entry_point
stringlengths
1
65
original_triton_code
stringlengths
4.5k
619k
python_code
stringlengths
208
60.9k
triton_code
stringlengths
1.15k
275k
repo_name
stringlengths
7
115
module_name
stringlengths
1
65
synthetic
bool
1 class
uuid
int64
0
18.5k
licenses
sequencelengths
1
6
stars
int64
0
19.8k
sha
stringlengths
40
40
repo_link
stringlengths
72
180
pytorch_code
stringlengths
200
4.05k
NormalAttention_dot
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/zo/czobpmlyr5atbcpsuque6vcmk7nafmb3smtbzoqilz46drm7zbkm.py # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] # Source node to ATen node mapping: # conv2d => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_1, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr0 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tl.store(in_out_ptr0 + (x0), tmp3, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/32/c32v7egt4mupqssam3gmac2qgv3ujprjybthsgweflmot256qqw7.py # Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution] # Source node to ATen node mapping: # conv2d_2 => convolution_2 # Graph fragment: # %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_1, %primals_6, %primals_7, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/2x/c2xwbpm4ch2balahokzyrd5zmtndolk2v733exjzhzhai6vm7yxw.py # Topologically Sorted Source Nodes: [energy_1, energy_2], Original ATen: [aten.elu, aten.div] # Source node to ATen node mapping: # energy_1 => expm1, gt, mul, mul_2, where # energy_2 => div # Graph fragment: # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%bmm, 0), kwargs = {}) # %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%bmm, 1.0), kwargs = {}) # %expm1 : [num_users=1] = call_function[target=torch.ops.aten.expm1.default](args = (%mul,), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expm1, 1.0), kwargs = {}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %mul, %mul_2), kwargs = {}) # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%where, 16), kwargs = {}) triton_poi_fused_div_elu_2 = async_compile.triton('triton_poi_fused_div_elu_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_elu_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_elu_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 1.0 tmp4 = tmp0 * tmp3 tmp5 = libdevice.expm1(tmp4) tmp6 = tmp5 * tmp3 tmp7 = tl.where(tmp2, tmp4, tmp6) tmp8 = 0.0625 tmp9 = tmp7 * tmp8 tl.store(out_ptr0 + (x0), tmp9, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_3, (1, ), (1, )) assert_size_stride(primals_4, (1, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_5, (1, ), (1, )) assert_size_stride(primals_6, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_7, (4, ), (1, )) assert_size_stride(primals_8, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_9, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 1, 4, 4), (16, 16, 4, 1)) # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(primals_1, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 1, 4, 4), (16, 16, 4, 1)) buf2 = reinterpret_tensor(buf0, (4, 1, 4, 4), (16, 1, 4, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] stream0 = get_raw_stream(0) triton_poi_fused_convolution_0.run(buf2, primals_3, 64, grid=grid(64), stream=stream0) del primals_3 buf3 = reinterpret_tensor(buf1, (4, 1, 4, 4), (16, 1, 4, 1), 0); del buf1 # reuse # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] triton_poi_fused_convolution_0.run(buf3, primals_5, 64, grid=grid(64), stream=stream0) del primals_5 buf4 = empty_strided_cuda((4, 16, 16), (256, 16, 1), torch.float32) # Topologically Sorted Source Nodes: [energy], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf2, (4, 16, 1), (16, 1, 0), 0), reinterpret_tensor(buf3, (4, 1, 16), (16, 0, 1), 0), out=buf4) # Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution] buf5 = extern_kernels.convolution(primals_1, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf5, (4, 4, 4, 4), (64, 16, 4, 1)) buf6 = buf5; del buf5 # reuse # Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution] triton_poi_fused_convolution_1.run(buf6, primals_7, 256, grid=grid(256), stream=stream0) del primals_7 buf7 = empty_strided_cuda((4, 16, 16), (256, 16, 1), torch.float32) # Topologically Sorted Source Nodes: [energy_1, energy_2], Original ATen: [aten.elu, aten.div] triton_poi_fused_div_elu_2.run(buf4, buf7, 1024, grid=grid(1024), stream=stream0) buf8 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32) # Topologically Sorted Source Nodes: [energy_1, energy_2, bmm_1], Original ATen: [aten.elu, aten.div, aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf6, (4, 4, 16), (64, 16, 1), 0), buf7, out=buf8) # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.convolution] buf9 = extern_kernels.convolution(reinterpret_tensor(buf8, (4, 4, 4, 4), (64, 16, 4, 1), 0), primals_8, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf9, (4, 4, 4, 4), (64, 16, 4, 1)) buf10 = buf9; del buf9 # reuse # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.convolution] triton_poi_fused_convolution_1.run(buf10, primals_9, 256, grid=grid(256), stream=stream0) del primals_9 return (buf10, primals_1, primals_2, primals_4, primals_6, primals_8, buf4, reinterpret_tensor(buf8, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(buf6, (4, 16, 4), (64, 1, 16), 0), reinterpret_tensor(buf7, (4, 16, 16), (256, 1, 16), 0), reinterpret_tensor(buf2, (4, 1, 16), (16, 16, 1), 0), reinterpret_tensor(buf3, (4, 16, 1), (16, 1, 16), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class NormalAttention_dot(nn.Module): def __init__(self, input_channel_num, k=4): super(NormalAttention_dot, self).__init__() self.c_in = input_channel_num self.query_conv = nn.Conv2d(in_channels=self.c_in, out_channels= self.c_in // k, kernel_size=1) self.key_conv = nn.Conv2d(in_channels=self.c_in, out_channels=self. c_in // k, kernel_size=1) self.value_conv = nn.Conv2d(in_channels=self.c_in, out_channels= self.c_in, kernel_size=1) self.nonlin = nn.ELU() self.gamma = nn.Conv2d(in_channels=self.c_in, out_channels=self. c_in, kernel_size=1) def forward(self, x): B, C, H, W = x.size() proj_query = self.query_conv(x).view(B, -1, H * W).permute(0, 2, 1) proj_key = self.key_conv(x).view(B, -1, H * W) energy = torch.bmm(proj_query, proj_key) energy = self.nonlin(energy) energy = energy / (H * W) proj_value = self.value_conv(x).view(B, -1, H * W) out = torch.bmm(proj_value, energy).view(B, C, H, W) out = self.gamma(out) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_channel_num': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tl.store(in_out_ptr0 + x0, tmp3, xmask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused_div_elu_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 1.0 tmp4 = tmp0 * tmp3 tmp5 = libdevice.expm1(tmp4) tmp6 = tmp5 * tmp3 tmp7 = tl.where(tmp2, tmp4, tmp6) tmp8 = 0.0625 tmp9 = tmp7 * tmp8 tl.store(out_ptr0 + x0, tmp9, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_3, (1,), (1,)) assert_size_stride(primals_4, (1, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_5, (1,), (1,)) assert_size_stride(primals_6, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_9, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 1, 4, 4), (16, 16, 4, 1)) buf1 = extern_kernels.convolution(primals_1, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 1, 4, 4), (16, 16, 4, 1)) buf2 = reinterpret_tensor(buf0, (4, 1, 4, 4), (16, 1, 4, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(64)](buf2, primals_3, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_3 buf3 = reinterpret_tensor(buf1, (4, 1, 4, 4), (16, 1, 4, 1), 0) del buf1 triton_poi_fused_convolution_0[grid(64)](buf3, primals_5, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_5 buf4 = empty_strided_cuda((4, 16, 16), (256, 16, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf2, (4, 16, 1), (16, 1, 0), 0), reinterpret_tensor(buf3, (4, 1, 16), (16, 0, 1), 0), out=buf4) buf5 = extern_kernels.convolution(primals_1, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf5, (4, 4, 4, 4), (64, 16, 4, 1)) buf6 = buf5 del buf5 triton_poi_fused_convolution_1[grid(256)](buf6, primals_7, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_7 buf7 = empty_strided_cuda((4, 16, 16), (256, 16, 1), torch.float32) triton_poi_fused_div_elu_2[grid(1024)](buf4, buf7, 1024, XBLOCK=256, num_warps=4, num_stages=1) buf8 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf6, (4, 4, 16), (64, 16, 1), 0), buf7, out=buf8) buf9 = extern_kernels.convolution(reinterpret_tensor(buf8, (4, 4, 4, 4), (64, 16, 4, 1), 0), primals_8, stride=(1, 1), padding=(0, 0 ), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf9, (4, 4, 4, 4), (64, 16, 4, 1)) buf10 = buf9 del buf9 triton_poi_fused_convolution_1[grid(256)](buf10, primals_9, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_9 return (buf10, primals_1, primals_2, primals_4, primals_6, primals_8, buf4, reinterpret_tensor(buf8, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(buf6, (4, 16, 4), (64, 1, 16), 0), reinterpret_tensor(buf7, (4, 16, 16), (256, 1, 16), 0), reinterpret_tensor(buf2, (4, 1, 16), (16, 16, 1), 0), reinterpret_tensor(buf3, (4, 16, 1), (16, 1, 16), 0)) class NormalAttention_dotNew(nn.Module): def __init__(self, input_channel_num, k=4): super(NormalAttention_dotNew, self).__init__() self.c_in = input_channel_num self.query_conv = nn.Conv2d(in_channels=self.c_in, out_channels= self.c_in // k, kernel_size=1) self.key_conv = nn.Conv2d(in_channels=self.c_in, out_channels=self. c_in // k, kernel_size=1) self.value_conv = nn.Conv2d(in_channels=self.c_in, out_channels= self.c_in, kernel_size=1) self.nonlin = nn.ELU() self.gamma = nn.Conv2d(in_channels=self.c_in, out_channels=self. c_in, kernel_size=1) def forward(self, input_0): primals_2 = self.query_conv.weight primals_3 = self.query_conv.bias primals_4 = self.key_conv.weight primals_5 = self.key_conv.bias primals_6 = self.value_conv.weight primals_7 = self.value_conv.bias primals_8 = self.gamma.weight primals_9 = self.gamma.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
Schwartz-Zha/My-invertible-resnet
NormalAttention_dot
false
1,037
[ "MIT" ]
0
5415975bb0d640f3bf3ef4a7b986563e84109270
https://github.com/Schwartz-Zha/My-invertible-resnet/tree/5415975bb0d640f3bf3ef4a7b986563e84109270
import torch import torch.nn as nn class Model(nn.Module): def __init__(self, input_channel_num, k=4): super().__init__() self.c_in = input_channel_num self.query_conv = nn.Conv2d(in_channels=self.c_in, out_channels= self.c_in // k, kernel_size=1) self.key_conv = nn.Conv2d(in_channels=self.c_in, out_channels=self. c_in // k, kernel_size=1) self.value_conv = nn.Conv2d(in_channels=self.c_in, out_channels= self.c_in, kernel_size=1) self.nonlin = nn.ELU() self.gamma = nn.Conv2d(in_channels=self.c_in, out_channels=self. c_in, kernel_size=1) def forward(self, x): B, C, H, W = x.size() proj_query = self.query_conv(x).view(B, -1, H * W).permute(0, 2, 1) proj_key = self.key_conv(x).view(B, -1, H * W) energy = torch.bmm(proj_query, proj_key) energy = self.nonlin(energy) energy = energy / (H * W) proj_value = self.value_conv(x).view(B, -1, H * W) out = torch.bmm(proj_value, energy).view(B, C, H, W) out = self.gamma(out) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4]
ActNorm2D
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/mk/cmkfq3mg2sl66skmhudljmlrjckeersi3ye3frqm2bl7felzsn2n.py # Topologically Sorted Source Nodes: [sum_1, mul, logdet], Original ATen: [aten.sum, aten.mul] # Source node to ATen node mapping: # logdet => mul_1 # mul => mul # sum_1 => sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%unsqueeze_2,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_1, 4), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, 4), kwargs = {}) triton_per_fused_mul_sum_0 = async_compile.triton('triton_per_fused_mul_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 4], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=(2,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mul_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_mul_sum_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 4 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.sum(tmp1, 1)[:, None] tmp4 = 4.0 tmp5 = tmp3 * tmp4 tmp6 = tmp5 * tmp4 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp6, None) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/ok/cok2tne7527mk7p2rdtta5caje65jdloa27vp4xewaco2m24lfgp.py # Topologically Sorted Source Nodes: [exp, mul_2, add], Original ATen: [aten.exp, aten.mul, aten.add] # Source node to ATen node mapping: # add => add # exp => exp # mul_2 => mul_2 # Graph fragment: # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%unsqueeze_2,), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %exp), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_2, %unsqueeze_5), kwargs = {}) triton_poi_fused_add_exp_mul_1 = async_compile.triton('triton_poi_fused_add_exp_mul_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_exp_mul_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_exp_mul_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 4 tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp2 = tl_math.exp(tmp1) tmp3 = tmp0 * tmp2 tmp5 = tmp3 + tmp4 tl.store(out_ptr0 + (x3), tmp5, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, ), (1, )) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf2 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [sum_1, mul, logdet], Original ATen: [aten.sum, aten.mul] stream0 = get_raw_stream(0) triton_per_fused_mul_sum_0.run(buf2, primals_1, 1, 4, grid=grid(1), stream=stream0) buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [exp, mul_2, add], Original ATen: [aten.exp, aten.mul, aten.add] triton_poi_fused_add_exp_mul_1.run(primals_2, primals_1, primals_3, buf1, 256, grid=grid(256), stream=stream0) del primals_3 return (buf1, buf2, primals_1, primals_2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn from torch.nn import Parameter class ActNorm2D(nn.Module): def __init__(self, num_channels, eps=1e-05): super(ActNorm2D, self).__init__() self.eps = eps self.num_channels = num_channels self._log_scale = Parameter(torch.Tensor(num_channels)) self._shift = Parameter(torch.Tensor(num_channels)) self._init = False def log_scale(self): return self._log_scale[None, :, None, None] def shift(self): return self._shift[None, :, None, None] def forward(self, x): if not self._init: with torch.no_grad(): assert self.num_channels == x.size(1) mean = torch.transpose(x, 0, 1).contiguous().view(self. num_channels, -1).mean(dim=1) zero_mean = x - mean[None, :, None, None] var = torch.transpose(zero_mean ** 2, 0, 1).contiguous().view( self.num_channels, -1).mean(dim=1) std = (var + self.eps) ** 0.5 log_scale = torch.log(1.0 / std) self._shift.data = -mean * torch.exp(log_scale) self._log_scale.data = log_scale self._init = True log_scale = self.log_scale() logdet = log_scale.sum() * x.size(2) * x.size(3) return x * torch.exp(log_scale) + self.shift(), logdet def inverse(self, x): return (x - self.shift()) * torch.exp(-self.log_scale()) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_channels': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn from torch.nn import Parameter assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_mul_sum_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.sum(tmp1, 1)[:, None] tmp4 = 4.0 tmp5 = tmp3 * tmp4 tmp6 = tmp5 * tmp4 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp6, None) @triton.jit def triton_poi_fused_add_exp_mul_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp2 = tl_math.exp(tmp1) tmp3 = tmp0 * tmp2 tmp5 = tmp3 + tmp4 tl.store(out_ptr0 + x3, tmp5, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4,), (1,)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf2 = buf0 del buf0 get_raw_stream(0) triton_per_fused_mul_sum_0[grid(1)](buf2, primals_1, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_exp_mul_1[grid(256)](primals_2, primals_1, primals_3, buf1, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_3 return buf1, buf2, primals_1, primals_2 class ActNorm2DNew(nn.Module): def __init__(self, num_channels, eps=1e-05): super(ActNorm2DNew, self).__init__() self.eps = eps self.num_channels = num_channels self._log_scale = Parameter(torch.Tensor(num_channels)) self._shift = Parameter(torch.Tensor(num_channels)) self._init = False def log_scale(self): return self._log_scale[None, :, None, None] def shift(self): return self._shift[None, :, None, None] def inverse(self, x): return (x - self.shift()) * torch.exp(-self.log_scale()) def forward(self, input_0): primals_1 = self._log_scale primals_3 = self._shift primals_2 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0], output[1]
Schwartz-Zha/My-invertible-resnet
ActNorm2D
false
1,038
[ "MIT" ]
0
5415975bb0d640f3bf3ef4a7b986563e84109270
https://github.com/Schwartz-Zha/My-invertible-resnet/tree/5415975bb0d640f3bf3ef4a7b986563e84109270
import torch import torch.nn as nn from torch.nn import Parameter class Model(nn.Module): def __init__(self, num_channels, eps=1e-05): super().__init__() self.eps = eps self.num_channels = num_channels self._log_scale = Parameter(torch.Tensor(num_channels)) self._shift = Parameter(torch.Tensor(num_channels)) self._init = False def log_scale(self): return self._log_scale[None, :, None, None] def shift(self): return self._shift[None, :, None, None] def forward(self, x): if not self._init: with torch.no_grad(): assert self.num_channels == x.size(1) mean = torch.transpose(x, 0, 1).contiguous().view(self. num_channels, -1).mean(dim=1) zero_mean = x - mean[None, :, None, None] var = torch.transpose(zero_mean ** 2, 0, 1).contiguous().view( self.num_channels, -1).mean(dim=1) std = (var + self.eps) ** 0.5 log_scale = torch.log(1.0 / std) self._shift.data = -mean * torch.exp(log_scale) self._log_scale.data = log_scale self._init = True log_scale = self.log_scale() logdet = log_scale.sum() * x.size(2) * x.size(3) return x * torch.exp(log_scale) + self.shift(), logdet def inverse(self, x): return (x - self.shift()) * torch.exp(-self.log_scale()) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4]
NormalAttention_embedded_gaussian
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/zo/czobpmlyr5atbcpsuque6vcmk7nafmb3smtbzoqilz46drm7zbkm.py # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] # Source node to ATen node mapping: # conv2d => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_1, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr0 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tl.store(in_out_ptr0 + (x0), tmp3, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/63/c63abizbceybowmxasyhbvyg6gmcs7m552tgg653wdhaqei77hz3.py # Topologically Sorted Source Nodes: [energy_1, energy_sum, energy_2], Original ATen: [aten.exp, aten.sum, aten.div] # Source node to ATen node mapping: # energy_1 => exp # energy_2 => div # energy_sum => sum_1 # Graph fragment: # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%bmm,), kwargs = {}) # %sum_1 : [num_users=2] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [2], True), kwargs = {}) # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_per_fused_div_exp_sum_1 = async_compile.triton('triton_per_fused_div_exp_sum_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[64, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_div_exp_sum_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_div_exp_sum_1(in_ptr0, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 64 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0) tmp1 = tl_math.exp(tmp0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.where(xmask, tmp2, 0) tmp5 = tl.sum(tmp4, 1)[:, None] tmp6 = tmp1 / tmp5 tl.store(out_ptr1 + (r1 + (16*x0)), tmp6, xmask) tl.store(out_ptr0 + (x0), tmp5, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/w5/cw5gytijzzkwnfpq2a2axdsj4pfxgxmwiuzizuyd4bw5uwnanzw7.py # Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution] # Source node to ATen node mapping: # conv2d_2 => convolution_2 # Graph fragment: # %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_1, %primals_6, %primals_7, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_2 = async_compile.triton('triton_poi_fused_convolution_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_3, (1, ), (1, )) assert_size_stride(primals_4, (1, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_5, (1, ), (1, )) assert_size_stride(primals_6, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_7, (4, ), (1, )) assert_size_stride(primals_8, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_9, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 1, 4, 4), (16, 16, 4, 1)) # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(primals_1, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 1, 4, 4), (16, 16, 4, 1)) buf2 = reinterpret_tensor(buf0, (4, 1, 4, 4), (16, 1, 4, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] stream0 = get_raw_stream(0) triton_poi_fused_convolution_0.run(buf2, primals_3, 64, grid=grid(64), stream=stream0) del primals_3 buf3 = reinterpret_tensor(buf1, (4, 1, 4, 4), (16, 1, 4, 1), 0); del buf1 # reuse # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] triton_poi_fused_convolution_0.run(buf3, primals_5, 64, grid=grid(64), stream=stream0) del primals_5 buf4 = empty_strided_cuda((4, 16, 16), (256, 16, 1), torch.float32) # Topologically Sorted Source Nodes: [energy], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf2, (4, 16, 1), (16, 1, 0), 0), reinterpret_tensor(buf3, (4, 1, 16), (16, 0, 1), 0), out=buf4) buf5 = empty_strided_cuda((4, 16, 1), (16, 1, 1), torch.float32) buf6 = empty_strided_cuda((4, 16, 16), (256, 16, 1), torch.float32) # Topologically Sorted Source Nodes: [energy_1, energy_sum, energy_2], Original ATen: [aten.exp, aten.sum, aten.div] triton_per_fused_div_exp_sum_1.run(buf4, buf5, buf6, 64, 16, grid=grid(64), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution] buf7 = extern_kernels.convolution(primals_1, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf7, (4, 4, 4, 4), (64, 16, 4, 1)) buf8 = buf7; del buf7 # reuse # Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution] triton_poi_fused_convolution_2.run(buf8, primals_7, 256, grid=grid(256), stream=stream0) del primals_7 buf9 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32) # Topologically Sorted Source Nodes: [bmm_1], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf8, (4, 4, 16), (64, 16, 1), 0), buf6, out=buf9) # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.convolution] buf10 = extern_kernels.convolution(reinterpret_tensor(buf9, (4, 4, 4, 4), (64, 16, 4, 1), 0), primals_8, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf10, (4, 4, 4, 4), (64, 16, 4, 1)) buf11 = buf10; del buf10 # reuse # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.convolution] triton_poi_fused_convolution_2.run(buf11, primals_9, 256, grid=grid(256), stream=stream0) del primals_9 return (buf11, primals_1, primals_2, primals_4, primals_6, primals_8, buf4, buf5, buf6, reinterpret_tensor(buf9, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(buf8, (4, 16, 4), (64, 1, 16), 0), reinterpret_tensor(buf2, (4, 1, 16), (16, 16, 1), 0), reinterpret_tensor(buf3, (4, 16, 1), (16, 1, 16), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class NormalAttention_embedded_gaussian(nn.Module): def __init__(self, input_channel_num, k=4): super(NormalAttention_embedded_gaussian, self).__init__() self.c_in = input_channel_num self.query_conv = nn.Conv2d(in_channels=self.c_in, out_channels= self.c_in // k, kernel_size=1) self.key_conv = nn.Conv2d(in_channels=self.c_in, out_channels=self. c_in // k, kernel_size=1) self.value_conv = nn.Conv2d(in_channels=self.c_in, out_channels= self.c_in, kernel_size=1) self.gamma = nn.Conv2d(in_channels=self.c_in, out_channels=self. c_in, kernel_size=1) def forward(self, x): B, C, H, W = x.size() proj_query = self.query_conv(x).view(B, -1, H * W).permute(0, 2, 1) proj_key = self.key_conv(x).view(B, -1, H * W) energy = torch.bmm(proj_query, proj_key) energy = torch.exp(energy) energy_sum = torch.sum(energy, dim=2, keepdim=True) energy = energy / energy_sum proj_value = self.value_conv(x).view(B, -1, H * W) out = torch.bmm(proj_value, energy).view(B, C, H, W) out = self.gamma(out) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_channel_num': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tl.store(in_out_ptr0 + x0, tmp3, xmask) @triton.jit def triton_per_fused_div_exp_sum_1(in_ptr0, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 64 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tl_math.exp(tmp0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.where(xmask, tmp2, 0) tmp5 = tl.sum(tmp4, 1)[:, None] tmp6 = tmp1 / tmp5 tl.store(out_ptr1 + (r1 + 16 * x0), tmp6, xmask) tl.store(out_ptr0 + x0, tmp5, xmask) @triton.jit def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_3, (1,), (1,)) assert_size_stride(primals_4, (1, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_5, (1,), (1,)) assert_size_stride(primals_6, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_9, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 1, 4, 4), (16, 16, 4, 1)) buf1 = extern_kernels.convolution(primals_1, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 1, 4, 4), (16, 16, 4, 1)) buf2 = reinterpret_tensor(buf0, (4, 1, 4, 4), (16, 1, 4, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(64)](buf2, primals_3, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_3 buf3 = reinterpret_tensor(buf1, (4, 1, 4, 4), (16, 1, 4, 1), 0) del buf1 triton_poi_fused_convolution_0[grid(64)](buf3, primals_5, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_5 buf4 = empty_strided_cuda((4, 16, 16), (256, 16, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf2, (4, 16, 1), (16, 1, 0), 0), reinterpret_tensor(buf3, (4, 1, 16), (16, 0, 1), 0), out=buf4) buf5 = empty_strided_cuda((4, 16, 1), (16, 1, 1), torch.float32) buf6 = empty_strided_cuda((4, 16, 16), (256, 16, 1), torch.float32) triton_per_fused_div_exp_sum_1[grid(64)](buf4, buf5, buf6, 64, 16, XBLOCK=32, num_warps=4, num_stages=1) buf7 = extern_kernels.convolution(primals_1, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf7, (4, 4, 4, 4), (64, 16, 4, 1)) buf8 = buf7 del buf7 triton_poi_fused_convolution_2[grid(256)](buf8, primals_7, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_7 buf9 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf8, (4, 4, 16), (64, 16, 1), 0), buf6, out=buf9) buf10 = extern_kernels.convolution(reinterpret_tensor(buf9, (4, 4, 4, 4), (64, 16, 4, 1), 0), primals_8, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf10, (4, 4, 4, 4), (64, 16, 4, 1)) buf11 = buf10 del buf10 triton_poi_fused_convolution_2[grid(256)](buf11, primals_9, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_9 return (buf11, primals_1, primals_2, primals_4, primals_6, primals_8, buf4, buf5, buf6, reinterpret_tensor(buf9, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(buf8, (4, 16, 4), (64, 1, 16), 0), reinterpret_tensor(buf2, (4, 1, 16), (16, 16, 1), 0), reinterpret_tensor(buf3, (4, 16, 1), (16, 1, 16), 0)) class NormalAttention_embedded_gaussianNew(nn.Module): def __init__(self, input_channel_num, k=4): super(NormalAttention_embedded_gaussianNew, self).__init__() self.c_in = input_channel_num self.query_conv = nn.Conv2d(in_channels=self.c_in, out_channels= self.c_in // k, kernel_size=1) self.key_conv = nn.Conv2d(in_channels=self.c_in, out_channels=self. c_in // k, kernel_size=1) self.value_conv = nn.Conv2d(in_channels=self.c_in, out_channels= self.c_in, kernel_size=1) self.gamma = nn.Conv2d(in_channels=self.c_in, out_channels=self. c_in, kernel_size=1) def forward(self, input_0): primals_2 = self.query_conv.weight primals_3 = self.query_conv.bias primals_4 = self.key_conv.weight primals_5 = self.key_conv.bias primals_6 = self.value_conv.weight primals_7 = self.value_conv.bias primals_8 = self.gamma.weight primals_9 = self.gamma.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
Schwartz-Zha/My-invertible-resnet
NormalAttention_embedded_gaussian
false
1,039
[ "MIT" ]
0
5415975bb0d640f3bf3ef4a7b986563e84109270
https://github.com/Schwartz-Zha/My-invertible-resnet/tree/5415975bb0d640f3bf3ef4a7b986563e84109270
import torch import torch.nn as nn class Model(nn.Module): def __init__(self, input_channel_num, k=4): super().__init__() self.c_in = input_channel_num self.query_conv = nn.Conv2d(in_channels=self.c_in, out_channels= self.c_in // k, kernel_size=1) self.key_conv = nn.Conv2d(in_channels=self.c_in, out_channels=self. c_in // k, kernel_size=1) self.value_conv = nn.Conv2d(in_channels=self.c_in, out_channels= self.c_in, kernel_size=1) self.gamma = nn.Conv2d(in_channels=self.c_in, out_channels=self. c_in, kernel_size=1) def forward(self, x): B, C, H, W = x.size() proj_query = self.query_conv(x).view(B, -1, H * W).permute(0, 2, 1) proj_key = self.key_conv(x).view(B, -1, H * W) energy = torch.bmm(proj_query, proj_key) energy = torch.exp(energy) energy_sum = torch.sum(energy, dim=2, keepdim=True) energy = energy / energy_sum proj_value = self.value_conv(x).view(B, -1, H * W) out = torch.bmm(proj_value, energy).view(B, C, H, W) out = self.gamma(out) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4]
Net
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/hr/chrb4easzia4dw753qymwrkwdvp5554y3k2pc6zt3dtjbxgoihzj.py # Topologically Sorted Source Nodes: [pi1], Original ATen: [aten.tanh] # Source node to ATen node mapping: # pi1 => tanh # Graph fragment: # %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%view_1,), kwargs = {}) triton_poi_fused_tanh_0 = async_compile.triton('triton_poi_fused_tanh_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8192], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 8192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + (x2), tmp3, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args args.clear() assert_size_stride(primals_1, (128, 4), (4, 1)) assert_size_stride(primals_2, (128, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 128), (128, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (128, 4), (4, 1)) assert_size_stride(primals_7, (128, ), (1, )) assert_size_stride(primals_8, (1, 128), (128, 1)) assert_size_stride(primals_9, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 128), (128, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 128), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 128), (2048, 512, 128, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [pi1], Original ATen: [aten.tanh] stream0 = get_raw_stream(0) triton_poi_fused_tanh_0.run(buf1, primals_2, 8192, grid=grid(8192), stream=stream0) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [logits], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 128), (128, 1), 0), reinterpret_tensor(primals_4, (128, 4), (1, 128), 0), alpha=1, beta=1, out=buf2) del primals_5 buf3 = empty_strided_cuda((64, 128), (128, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 128), (1, 4), 0), out=buf3) del primals_6 buf4 = reinterpret_tensor(buf3, (4, 4, 4, 128), (2048, 512, 128, 1), 0); del buf3 # reuse # Topologically Sorted Source Nodes: [v1], Original ATen: [aten.tanh] triton_poi_fused_tanh_0.run(buf4, primals_7, 8192, grid=grid(8192), stream=stream0) del primals_7 buf6 = empty_strided_cuda((64, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [values], Original ATen: [aten.addmm] extern_kernels.addmm(primals_9, reinterpret_tensor(buf4, (64, 128), (128, 1), 0), reinterpret_tensor(primals_8, (128, 1), (1, 128), 0), alpha=1, beta=1, out=buf6) del primals_9 return (reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(buf6, (4, 4, 4, 1), (16, 4, 1, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf1, buf4, primals_8, primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((128, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 128), (128, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((128, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((1, 128), (128, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F def set_init(layers): for layer in layers: nn.init.normal_(layer.weight, mean=0.0, std=0.1) nn.init.constant_(layer.bias, 0.0) class Net(nn.Module): def __init__(self, s_dim, a_dim): super(Net, self).__init__() self.s_dim = s_dim self.a_dim = a_dim self.pi1 = nn.Linear(s_dim, 128) self.pi2 = nn.Linear(128, a_dim) self.v1 = nn.Linear(s_dim, 128) self.v2 = nn.Linear(128, 1) set_init([self.pi1, self.pi2, self.v1, self.v2]) self.distribution = torch.distributions.Categorical def forward(self, x): pi1 = torch.tanh(self.pi1(x)) logits = self.pi2(pi1) v1 = torch.tanh(self.v1(x)) values = self.v2(v1) return logits, values def choose_action(self, s): self.eval() logits, _ = self.forward(s) prob = F.softmax(logits, dim=1).data m = self.distribution(prob) return m.sample().numpy()[0] def loss_func(self, s, a, v_t): self.train() logits, values = self.forward(s) td = v_t - values c_loss = td.pow(2) probs = F.softmax(logits, dim=1) m = self.distribution(probs) exp_v = m.log_prob(a) * td.detach().squeeze() a_loss = -exp_v total_loss = (c_loss + a_loss).mean() return total_loss def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'s_dim': 4, 'a_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x2, tmp3, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (128, 4), (4, 1)) assert_size_stride(primals_2, (128,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 128), (128, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (128, 4), (4, 1)) assert_size_stride(primals_7, (128,), (1,)) assert_size_stride(primals_8, (1, 128), (128, 1)) assert_size_stride(primals_9, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 128), (128, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 128), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 128), (2048, 512, 128, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_tanh_0[grid(8192)](buf1, primals_2, 8192, XBLOCK= 128, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 128), (128, 1), 0), reinterpret_tensor(primals_4, (128, 4), (1, 128), 0), alpha=1, beta=1, out=buf2) del primals_5 buf3 = empty_strided_cuda((64, 128), (128, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 128), (1, 4), 0), out=buf3) del primals_6 buf4 = reinterpret_tensor(buf3, (4, 4, 4, 128), (2048, 512, 128, 1), 0) del buf3 triton_poi_fused_tanh_0[grid(8192)](buf4, primals_7, 8192, XBLOCK= 128, num_warps=4, num_stages=1) del primals_7 buf6 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_9, reinterpret_tensor(buf4, (64, 128), (128, 1), 0), reinterpret_tensor(primals_8, (128, 1), (1, 128), 0), alpha=1, beta=1, out=buf6) del primals_9 return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(buf6, (4, 4, 4, 1), (16, 4, 1, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf1, buf4, primals_8, primals_4 def set_init(layers): for layer in layers: nn.init.normal_(layer.weight, mean=0.0, std=0.1) nn.init.constant_(layer.bias, 0.0) class NetNew(nn.Module): def __init__(self, s_dim, a_dim): super(NetNew, self).__init__() self.s_dim = s_dim self.a_dim = a_dim self.pi1 = nn.Linear(s_dim, 128) self.pi2 = nn.Linear(128, a_dim) self.v1 = nn.Linear(s_dim, 128) self.v2 = nn.Linear(128, 1) set_init([self.pi1, self.pi2, self.v1, self.v2]) self.distribution = torch.distributions.Categorical def choose_action(self, s): self.eval() logits, _ = self.forward(s) prob = F.softmax(logits, dim=1).data m = self.distribution(prob) return m.sample().numpy()[0] def loss_func(self, s, a, v_t): self.train() logits, values = self.forward(s) td = v_t - values c_loss = td.pow(2) probs = F.softmax(logits, dim=1) m = self.distribution(probs) exp_v = m.log_prob(a) * td.detach().squeeze() a_loss = -exp_v total_loss = (c_loss + a_loss).mean() return total_loss def forward(self, input_0): primals_1 = self.pi1.weight primals_2 = self.pi1.bias primals_4 = self.pi2.weight primals_5 = self.pi2.bias primals_6 = self.v1.weight primals_7 = self.v1.bias primals_8 = self.v2.weight primals_9 = self.v2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0], output[1]
SeungyounShin/pytorch-A3C
Net
false
1,040
[ "MIT" ]
0
acb9c05a5e1a697c48a7d4c1a48b1c86326faf91
https://github.com/SeungyounShin/pytorch-A3C/tree/acb9c05a5e1a697c48a7d4c1a48b1c86326faf91
import torch import torch.nn as nn import torch.nn.functional as F def set_init(layers): for layer in layers: nn.init.normal_(layer.weight, mean=0.0, std=0.1) nn.init.constant_(layer.bias, 0.0) class Model(nn.Module): def __init__(self, s_dim, a_dim): super().__init__() self.s_dim = s_dim self.a_dim = a_dim self.pi1 = nn.Linear(s_dim, 128) self.pi2 = nn.Linear(128, a_dim) self.v1 = nn.Linear(s_dim, 128) self.v2 = nn.Linear(128, 1) set_init([self.pi1, self.pi2, self.v1, self.v2]) self.distribution = torch.distributions.Categorical def forward(self, x): pi1 = torch.tanh(self.pi1(x)) logits = self.pi2(pi1) v1 = torch.tanh(self.v1(x)) values = self.v2(v1) return logits, values def choose_action(self, s): self.eval() logits, _ = self.forward(s) prob = F.softmax(logits, dim=1).data m = self.distribution(prob) return m.sample().numpy()[0] def loss_func(self, s, a, v_t): self.train() logits, values = self.forward(s) td = v_t - values c_loss = td.pow(2) probs = F.softmax(logits, dim=1) m = self.distribution(probs) exp_v = m.log_prob(a) * td.detach().squeeze() a_loss = -exp_v total_loss = (c_loss + a_loss).mean() return total_loss def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 4]
DiceLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/um/cum65j23qchrjf5dndblqgbw6zomhgwfj2obfidtgy7b5j3zwklm.py # Topologically Sorted Source Nodes: [pred], Original ATen: [aten._softmax] # Source node to ATen node mapping: # pred => amax, exp, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%arg0_1, [1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x2), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/wk/cwk2wao7opapqbjj7klnqrd6tgist3ts3nc5veryzhzstwpx7d4l.py # Topologically Sorted Source Nodes: [pred], Original ATen: [aten._softmax] # Source node to ATen node mapping: # pred => div, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %div : [num_users=4] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/uq/cuqmt6h2h2qrwlfougoxbc4cnmi6o42afntw3nwkdzak2kpz2xum.py # Topologically Sorted Source Nodes: [mul, ne, valid_mask, valid_mask_1, mul_1, sum_1, mul_2, num, pow_1, pow_2, add_1, sum_2, den, truediv, loss, loss_1, total_loss, mul_3, valid_mask_2, mul_4, sum_3, mul_5, num_1, pow_3, pow_4, add_5, sum_4, den_1, truediv_1, loss_2, loss_3, total_loss_1, mul_6, valid_mask_3, mul_7, sum_5, mul_8, num_2, pow_5, pow_6, add_8, sum_6, den_2, truediv_2, loss_4, loss_5, total_loss_2, mul_9, valid_mask_4, mul_10, sum_7, mul_11, num_3, pow_7, pow_8, add_11, sum_8, den_3, truediv_3, loss_6, loss_7, total_loss_3, loss_8, loss_9, loss_10], Original ATen: [aten.mul, aten.ne, aten._to_copy, aten.view, aten.sum, aten.add, aten.pow, aten.div, aten.rsub, aten.mean] # Source node to ATen node mapping: # add_1 => add_1 # add_11 => add_13 # add_5 => add_5 # add_8 => add_9 # den => add_2 # den_1 => add_6 # den_2 => add_10 # den_3 => add_14 # loss => sub_1 # loss_1 => mean # loss_10 => mul_12 # loss_2 => sub_2 # loss_3 => mean_1 # loss_4 => sub_3 # loss_5 => mean_2 # loss_6 => sub_4 # loss_7 => mean_3 # loss_8 => div_5 # loss_9 => mean_4 # mul => mul # mul_1 => mul_1 # mul_10 => mul_10 # mul_11 => mul_11 # mul_2 => mul_2 # mul_3 => mul_3 # mul_4 => mul_4 # mul_5 => mul_5 # mul_6 => mul_6 # mul_7 => mul_7 # mul_8 => mul_8 # mul_9 => mul_9 # ne => ne # num => add # num_1 => add_4 # num_2 => add_8 # num_3 => add_12 # pow_1 => pow_1 # pow_2 => pow_2 # pow_3 => pow_3 # pow_4 => pow_4 # pow_5 => pow_5 # pow_6 => pow_6 # pow_7 => pow_7 # pow_8 => pow_8 # sum_1 => sum_2 # sum_2 => sum_3 # sum_3 => sum_4 # sum_4 => sum_5 # sum_5 => sum_6 # sum_6 => sum_7 # sum_7 => sum_8 # sum_8 => sum_9 # total_loss => add_3 # total_loss_1 => add_7 # total_loss_2 => add_11 # total_loss_3 => add_15 # truediv => div_1 # truediv_1 => div_2 # truediv_2 => div_3 # truediv_3 => div_4 # valid_mask => convert_element_type_2 # valid_mask_1 => view_2 # valid_mask_2 => view_5 # valid_mask_3 => view_8 # valid_mask_4 => view_11 # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view, %select_1), kwargs = {}) # %ne : [num_users=1] = call_function[target=torch.ops.aten.ne.Scalar](args = (%arg1_1, 255), kwargs = {}) # %convert_element_type_2 : [num_users=4] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%ne, torch.int64), kwargs = {}) # %view_2 : [num_users=1] = call_function[target=torch.ops.aten.reshape.default](args = (%convert_element_type_2, [4, -1]), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %view_2), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_1, [1]), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_2, 2), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_2, 1), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%view, 2), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%select_1, 2), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%pow_1, %pow_2), kwargs = {}) # %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%add_1, [1]), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_3, 1), kwargs = {}) # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add, %add_2), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %div_1), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sub_1,), kwargs = {}) # %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean, 0), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_3, %select_3), kwargs = {}) # %view_5 : [num_users=1] = call_function[target=torch.ops.aten.reshape.default](args = (%convert_element_type_2, [4, -1]), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_3, %view_5), kwargs = {}) # %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_4, [1]), kwargs = {}) # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_4, 2), kwargs = {}) # %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_5, 1), kwargs = {}) # %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%view_3, 2), kwargs = {}) # %pow_4 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%select_3, 2), kwargs = {}) # %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%pow_3, %pow_4), kwargs = {}) # %sum_5 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%add_5, [1]), kwargs = {}) # %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_5, 1), kwargs = {}) # %div_2 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_4, %add_6), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %div_2), kwargs = {}) # %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sub_2,), kwargs = {}) # %add_7 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_3, %mean_1), kwargs = {}) # %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_6, %select_5), kwargs = {}) # %view_8 : [num_users=1] = call_function[target=torch.ops.aten.reshape.default](args = (%convert_element_type_2, [4, -1]), kwargs = {}) # %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_6, %view_8), kwargs = {}) # %sum_6 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_7, [1]), kwargs = {}) # %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_6, 2), kwargs = {}) # %add_8 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_8, 1), kwargs = {}) # %pow_5 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%view_6, 2), kwargs = {}) # %pow_6 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%select_5, 2), kwargs = {}) # %add_9 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%pow_5, %pow_6), kwargs = {}) # %sum_7 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%add_9, [1]), kwargs = {}) # %add_10 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_7, 1), kwargs = {}) # %div_3 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_8, %add_10), kwargs = {}) # %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %div_3), kwargs = {}) # %mean_2 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sub_3,), kwargs = {}) # %add_11 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_7, %mean_2), kwargs = {}) # %mul_9 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_9, %select_7), kwargs = {}) # %view_11 : [num_users=1] = call_function[target=torch.ops.aten.reshape.default](args = (%convert_element_type_2, [4, -1]), kwargs = {}) # %mul_10 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_9, %view_11), kwargs = {}) # %sum_8 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_10, [1]), kwargs = {}) # %mul_11 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_8, 2), kwargs = {}) # %add_12 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_11, 1), kwargs = {}) # %pow_7 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%view_9, 2), kwargs = {}) # %pow_8 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%select_7, 2), kwargs = {}) # %add_13 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%pow_7, %pow_8), kwargs = {}) # %sum_9 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%add_13, [1]), kwargs = {}) # %add_14 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_9, 1), kwargs = {}) # %div_4 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_12, %add_14), kwargs = {}) # %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %div_4), kwargs = {}) # %mean_3 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sub_4,), kwargs = {}) # %add_15 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_11, %mean_3), kwargs = {}) # %div_5 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_15, 4), kwargs = {}) # %mean_4 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%div_5,), kwargs = {}) # %mul_12 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean_4, 1.0), kwargs = {}) triton_per_fused__to_copy_add_div_mean_mul_ne_pow_rsub_sum_view_2 = async_compile.triton('triton_per_fused__to_copy_add_div_mean_mul_ne_pow_rsub_sum_view_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 4], reduction_hint=ReductionHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__to_copy_add_div_mean_mul_ne_pow_rsub_sum_view_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__to_copy_add_div_mean_mul_ne_pow_rsub_sum_view_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 4 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (4*r0), None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4*r0), None, eviction_policy='evict_last') tmp16 = tl.load(in_ptr1 + (1 + (4*r0)), None, eviction_policy='evict_last') tmp29 = tl.load(in_ptr1 + (2 + (4*r0)), None, eviction_policy='evict_last') tmp42 = tl.load(in_ptr1 + (3 + (4*r0)), None, eviction_policy='evict_last') tmp71 = tl.load(in_ptr0 + (1 + (4*r0)), None, eviction_policy='evict_last') tmp112 = tl.load(in_ptr0 + (2 + (4*r0)), None, eviction_policy='evict_last') tmp153 = tl.load(in_ptr0 + (3 + (4*r0)), None, eviction_policy='evict_last') tmp2 = tmp1.to(tl.int64) tmp3 = tl.full([1, 1], 0, tl.int64) tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp5 = tl.full([1, 1], 3, tl.int64) tmp6 = triton_helpers.minimum(tmp4, tmp5) tmp7 = tmp6 == tmp3 tmp8 = tmp7.to(tl.int64) tmp9 = tmp8.to(tl.float32) tmp10 = tmp0 * tmp9 tmp11 = 255.0 tmp12 = tmp1 != tmp11 tmp13 = tmp12.to(tl.int64) tmp14 = tmp13.to(tl.float32) tmp15 = tmp10 * tmp14 tmp17 = tmp16.to(tl.int64) tmp18 = triton_helpers.maximum(tmp17, tmp3) tmp19 = triton_helpers.minimum(tmp18, tmp5) tmp20 = tmp19 == tmp3 tmp21 = tmp20.to(tl.int64) tmp22 = tmp21.to(tl.float32) tmp23 = tmp0 * tmp22 tmp24 = tmp16 != tmp11 tmp25 = tmp24.to(tl.int64) tmp26 = tmp25.to(tl.float32) tmp27 = tmp23 * tmp26 tmp28 = tmp15 + tmp27 tmp30 = tmp29.to(tl.int64) tmp31 = triton_helpers.maximum(tmp30, tmp3) tmp32 = triton_helpers.minimum(tmp31, tmp5) tmp33 = tmp32 == tmp3 tmp34 = tmp33.to(tl.int64) tmp35 = tmp34.to(tl.float32) tmp36 = tmp0 * tmp35 tmp37 = tmp29 != tmp11 tmp38 = tmp37.to(tl.int64) tmp39 = tmp38.to(tl.float32) tmp40 = tmp36 * tmp39 tmp41 = tmp28 + tmp40 tmp43 = tmp42.to(tl.int64) tmp44 = triton_helpers.maximum(tmp43, tmp3) tmp45 = triton_helpers.minimum(tmp44, tmp5) tmp46 = tmp45 == tmp3 tmp47 = tmp46.to(tl.int64) tmp48 = tmp47.to(tl.float32) tmp49 = tmp0 * tmp48 tmp50 = tmp42 != tmp11 tmp51 = tmp50.to(tl.int64) tmp52 = tmp51.to(tl.float32) tmp53 = tmp49 * tmp52 tmp54 = tmp41 + tmp53 tmp55 = tmp0 * tmp0 tmp56 = tmp8 * tmp8 tmp57 = tmp56.to(tl.float32) tmp58 = tmp55 + tmp57 tmp59 = tmp21 * tmp21 tmp60 = tmp59.to(tl.float32) tmp61 = tmp55 + tmp60 tmp62 = tmp58 + tmp61 tmp63 = tmp34 * tmp34 tmp64 = tmp63.to(tl.float32) tmp65 = tmp55 + tmp64 tmp66 = tmp62 + tmp65 tmp67 = tmp47 * tmp47 tmp68 = tmp67.to(tl.float32) tmp69 = tmp55 + tmp68 tmp70 = tmp66 + tmp69 tmp72 = tl.full([1, 1], 1, tl.int64) tmp73 = tmp6 == tmp72 tmp74 = tmp73.to(tl.int64) tmp75 = tmp74.to(tl.float32) tmp76 = tmp71 * tmp75 tmp77 = tmp76 * tmp14 tmp78 = tmp19 == tmp72 tmp79 = tmp78.to(tl.int64) tmp80 = tmp79.to(tl.float32) tmp81 = tmp71 * tmp80 tmp82 = tmp81 * tmp26 tmp83 = tmp77 + tmp82 tmp84 = tmp32 == tmp72 tmp85 = tmp84.to(tl.int64) tmp86 = tmp85.to(tl.float32) tmp87 = tmp71 * tmp86 tmp88 = tmp87 * tmp39 tmp89 = tmp83 + tmp88 tmp90 = tmp45 == tmp72 tmp91 = tmp90.to(tl.int64) tmp92 = tmp91.to(tl.float32) tmp93 = tmp71 * tmp92 tmp94 = tmp93 * tmp52 tmp95 = tmp89 + tmp94 tmp96 = tmp71 * tmp71 tmp97 = tmp74 * tmp74 tmp98 = tmp97.to(tl.float32) tmp99 = tmp96 + tmp98 tmp100 = tmp79 * tmp79 tmp101 = tmp100.to(tl.float32) tmp102 = tmp96 + tmp101 tmp103 = tmp99 + tmp102 tmp104 = tmp85 * tmp85 tmp105 = tmp104.to(tl.float32) tmp106 = tmp96 + tmp105 tmp107 = tmp103 + tmp106 tmp108 = tmp91 * tmp91 tmp109 = tmp108.to(tl.float32) tmp110 = tmp96 + tmp109 tmp111 = tmp107 + tmp110 tmp113 = tl.full([1, 1], 2, tl.int64) tmp114 = tmp6 == tmp113 tmp115 = tmp114.to(tl.int64) tmp116 = tmp115.to(tl.float32) tmp117 = tmp112 * tmp116 tmp118 = tmp117 * tmp14 tmp119 = tmp19 == tmp113 tmp120 = tmp119.to(tl.int64) tmp121 = tmp120.to(tl.float32) tmp122 = tmp112 * tmp121 tmp123 = tmp122 * tmp26 tmp124 = tmp118 + tmp123 tmp125 = tmp32 == tmp113 tmp126 = tmp125.to(tl.int64) tmp127 = tmp126.to(tl.float32) tmp128 = tmp112 * tmp127 tmp129 = tmp128 * tmp39 tmp130 = tmp124 + tmp129 tmp131 = tmp45 == tmp113 tmp132 = tmp131.to(tl.int64) tmp133 = tmp132.to(tl.float32) tmp134 = tmp112 * tmp133 tmp135 = tmp134 * tmp52 tmp136 = tmp130 + tmp135 tmp137 = tmp112 * tmp112 tmp138 = tmp115 * tmp115 tmp139 = tmp138.to(tl.float32) tmp140 = tmp137 + tmp139 tmp141 = tmp120 * tmp120 tmp142 = tmp141.to(tl.float32) tmp143 = tmp137 + tmp142 tmp144 = tmp140 + tmp143 tmp145 = tmp126 * tmp126 tmp146 = tmp145.to(tl.float32) tmp147 = tmp137 + tmp146 tmp148 = tmp144 + tmp147 tmp149 = tmp132 * tmp132 tmp150 = tmp149.to(tl.float32) tmp151 = tmp137 + tmp150 tmp152 = tmp148 + tmp151 tmp154 = tmp6 == tmp5 tmp155 = tmp154.to(tl.int64) tmp156 = tmp155.to(tl.float32) tmp157 = tmp153 * tmp156 tmp158 = tmp157 * tmp14 tmp159 = tmp19 == tmp5 tmp160 = tmp159.to(tl.int64) tmp161 = tmp160.to(tl.float32) tmp162 = tmp153 * tmp161 tmp163 = tmp162 * tmp26 tmp164 = tmp158 + tmp163 tmp165 = tmp32 == tmp5 tmp166 = tmp165.to(tl.int64) tmp167 = tmp166.to(tl.float32) tmp168 = tmp153 * tmp167 tmp169 = tmp168 * tmp39 tmp170 = tmp164 + tmp169 tmp171 = tmp45 == tmp5 tmp172 = tmp171.to(tl.int64) tmp173 = tmp172.to(tl.float32) tmp174 = tmp153 * tmp173 tmp175 = tmp174 * tmp52 tmp176 = tmp170 + tmp175 tmp177 = tmp153 * tmp153 tmp178 = tmp155 * tmp155 tmp179 = tmp178.to(tl.float32) tmp180 = tmp177 + tmp179 tmp181 = tmp160 * tmp160 tmp182 = tmp181.to(tl.float32) tmp183 = tmp177 + tmp182 tmp184 = tmp180 + tmp183 tmp185 = tmp166 * tmp166 tmp186 = tmp185.to(tl.float32) tmp187 = tmp177 + tmp186 tmp188 = tmp184 + tmp187 tmp189 = tmp172 * tmp172 tmp190 = tmp189.to(tl.float32) tmp191 = tmp177 + tmp190 tmp192 = tmp188 + tmp191 tmp193 = 2.0 tmp194 = tmp54 * tmp193 tmp195 = 1.0 tmp196 = tmp194 + tmp195 tmp197 = tmp70 + tmp195 tmp198 = tmp196 / tmp197 tmp199 = tmp195 - tmp198 tmp200 = tl.broadcast_to(tmp199, [XBLOCK, RBLOCK]) tmp202 = tl.sum(tmp200, 1)[:, None] tmp203 = tmp95 * tmp193 tmp204 = tmp203 + tmp195 tmp205 = tmp111 + tmp195 tmp206 = tmp204 / tmp205 tmp207 = tmp195 - tmp206 tmp208 = tl.broadcast_to(tmp207, [XBLOCK, RBLOCK]) tmp210 = tl.sum(tmp208, 1)[:, None] tmp211 = tmp136 * tmp193 tmp212 = tmp211 + tmp195 tmp213 = tmp152 + tmp195 tmp214 = tmp212 / tmp213 tmp215 = tmp195 - tmp214 tmp216 = tl.broadcast_to(tmp215, [XBLOCK, RBLOCK]) tmp218 = tl.sum(tmp216, 1)[:, None] tmp219 = tmp176 * tmp193 tmp220 = tmp219 + tmp195 tmp221 = tmp192 + tmp195 tmp222 = tmp220 / tmp221 tmp223 = tmp195 - tmp222 tmp224 = tl.broadcast_to(tmp223, [XBLOCK, RBLOCK]) tmp226 = tl.sum(tmp224, 1)[:, None] tmp227 = 4.0 tmp228 = tmp202 / tmp227 tmp229 = 0.0 tmp230 = tmp228 + tmp229 tmp231 = tmp210 / tmp227 tmp232 = tmp230 + tmp231 tmp233 = tmp218 / tmp227 tmp234 = tmp232 + tmp233 tmp235 = tmp226 / tmp227 tmp236 = tmp234 + tmp235 tmp237 = 0.25 tmp238 = tmp236 * tmp237 tmp239 = tmp238 / tmp195 tmp240 = tmp239 * tmp195 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp240, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [pred], Original ATen: [aten._softmax] stream0 = get_raw_stream(0) triton_poi_fused__softmax_0.run(arg0_1, buf0, 16, grid=grid(16), stream=stream0) del arg0_1 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [pred], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf0, buf1, 16, grid=grid(16), stream=stream0) del buf0 buf10 = empty_strided_cuda((), (), torch.float32) buf14 = buf10; del buf10 # reuse # Topologically Sorted Source Nodes: [mul, ne, valid_mask, valid_mask_1, mul_1, sum_1, mul_2, num, pow_1, pow_2, add_1, sum_2, den, truediv, loss, loss_1, total_loss, mul_3, valid_mask_2, mul_4, sum_3, mul_5, num_1, pow_3, pow_4, add_5, sum_4, den_1, truediv_1, loss_2, loss_3, total_loss_1, mul_6, valid_mask_3, mul_7, sum_5, mul_8, num_2, pow_5, pow_6, add_8, sum_6, den_2, truediv_2, loss_4, loss_5, total_loss_2, mul_9, valid_mask_4, mul_10, sum_7, mul_11, num_3, pow_7, pow_8, add_11, sum_8, den_3, truediv_3, loss_6, loss_7, total_loss_3, loss_8, loss_9, loss_10], Original ATen: [aten.mul, aten.ne, aten._to_copy, aten.view, aten.sum, aten.add, aten.pow, aten.div, aten.rsub, aten.mean] triton_per_fused__to_copy_add_div_mean_mul_ne_pow_rsub_sum_view_2.run(buf14, buf1, arg1_1, 1, 4, grid=grid(1), stream=stream0) del arg1_1 del buf1 return (buf14, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import functools import torch import numpy as np import torch.nn.functional as F import torch.nn as nn import torch._C import torch.serialization def reduce_loss(loss, reduction): """Reduce loss as specified. Args: loss (Tensor): Elementwise loss tensor. reduction (str): Options are "none", "mean" and "sum". Return: Tensor: Reduced loss tensor. """ reduction_enum = F._Reduction.get_enum(reduction) if reduction_enum == 0: return loss elif reduction_enum == 1: return loss.mean() elif reduction_enum == 2: return loss.sum() def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None): """Apply element-wise weight and reduce loss. Args: loss (Tensor): Element-wise loss. weight (Tensor): Element-wise weights. reduction (str): Same as built-in losses of PyTorch. avg_factor (float): Avarage factor when computing the mean of losses. Returns: Tensor: Processed loss values. """ if weight is not None: assert weight.dim() == loss.dim() if weight.dim() > 1: assert weight.size(1) == 1 or weight.size(1) == loss.size(1) loss = loss * weight if avg_factor is None: loss = reduce_loss(loss, reduction) elif reduction == 'mean': loss = loss.sum() / avg_factor elif reduction != 'none': raise ValueError('avg_factor can not be used with reduction="sum"') return loss def weighted_loss(loss_func): """Create a weighted version of a given loss function. To use this decorator, the loss function must have the signature like `loss_func(pred, target, **kwargs)`. The function only needs to compute element-wise loss without any reduction. This decorator will add weight and reduction arguments to the function. The decorated function will have the signature like `loss_func(pred, target, weight=None, reduction='mean', avg_factor=None, **kwargs)`. :Example: >>> import torch >>> @weighted_loss >>> def l1_loss(pred, target): >>> return (pred - target).abs() >>> pred = torch.Tensor([0, 2, 3]) >>> target = torch.Tensor([1, 1, 1]) >>> weight = torch.Tensor([1, 0, 1]) >>> l1_loss(pred, target) tensor(1.3333) >>> l1_loss(pred, target, weight) tensor(1.) >>> l1_loss(pred, target, reduction='none') tensor([1., 1., 2.]) >>> l1_loss(pred, target, weight, avg_factor=2) tensor(1.5000) """ @functools.wraps(loss_func) def wrapper(pred, target, weight=None, reduction='mean', avg_factor= None, **kwargs): loss = loss_func(pred, target, **kwargs) loss = weight_reduce_loss(loss, weight, reduction, avg_factor) return loss return wrapper @weighted_loss def binary_dice_loss(pred, target, valid_mask, smooth=1, exponent=2, **kwards): assert pred.shape[0] == target.shape[0] pred = pred.reshape(pred.shape[0], -1) target = target.reshape(target.shape[0], -1) valid_mask = valid_mask.reshape(valid_mask.shape[0], -1) num = torch.sum(torch.mul(pred, target) * valid_mask, dim=1) * 2 + smooth den = torch.sum(pred.pow(exponent) + target.pow(exponent), dim=1) + smooth return 1 - num / den @weighted_loss def dice_loss(pred, target, valid_mask, smooth=1, exponent=2, class_weight= None, ignore_index=255): assert pred.shape[0] == target.shape[0] total_loss = 0 num_classes = pred.shape[1] for i in range(num_classes): if i != ignore_index: dice_loss = binary_dice_loss(pred[:, i], target[..., i], valid_mask=valid_mask, smooth=smooth, exponent=exponent) if class_weight is not None: dice_loss *= class_weight[i] total_loss += dice_loss return total_loss / num_classes def get_class_weight(class_weight): """Get class weight for loss function. Args: class_weight (list[float] | str | None): If class_weight is a str, take it as a file name and read from it. """ if isinstance(class_weight, str): if class_weight.endswith('.npy'): class_weight = np.load(class_weight) else: class_weight = mmcv.load(class_weight) return class_weight class DiceLoss(nn.Module): """DiceLoss. This loss is proposed in `V-Net: Fully Convolutional Neural Networks for Volumetric Medical Image Segmentation <https://arxiv.org/abs/1606.04797>`_. Args: loss_type (str, optional): Binary or multi-class loss. Default: 'multi_class'. Options are "binary" and "multi_class". smooth (float): A float number to smooth loss, and avoid NaN error. Default: 1 exponent (float): An float number to calculate denominator value: \\sum{x^exponent} + \\sum{y^exponent}. Default: 2. reduction (str, optional): The method used to reduce the loss. Options are "none", "mean" and "sum". This parameter only works when per_image is True. Default: 'mean'. class_weight (list[float] | str, optional): Weight of each class. If in str format, read them from a file. Defaults to None. loss_weight (float, optional): Weight of the loss. Default to 1.0. ignore_index (int | None): The label index to be ignored. Default: 255. """ def __init__(self, smooth=1, exponent=2, reduction='mean', class_weight =None, loss_weight=1.0, ignore_index=255, **kwards): super(DiceLoss, self).__init__() self.smooth = smooth self.exponent = exponent self.reduction = reduction self.class_weight = get_class_weight(class_weight) self.loss_weight = loss_weight self.ignore_index = ignore_index def forward(self, pred, target, avg_factor=None, reduction_override= None, **kwards): assert reduction_override in (None, 'none', 'mean', 'sum') reduction = (reduction_override if reduction_override else self. reduction) if self.class_weight is not None: class_weight = pred.new_tensor(self.class_weight) else: class_weight = None pred = F.softmax(pred, dim=1) num_classes = pred.shape[1] one_hot_target = F.one_hot(torch.clamp(target.long(), 0, num_classes - 1), num_classes=num_classes) valid_mask = (target != self.ignore_index).long() loss = self.loss_weight * dice_loss(pred, one_hot_target, valid_mask=valid_mask, reduction=reduction, avg_factor= avg_factor, smooth=self.smooth, exponent=self.exponent, class_weight=class_weight, ignore_index=self.ignore_index) return loss def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import functools import numpy as np import torch.nn.functional as F import torch.nn as nn import torch._C import torch.serialization assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_per_fused__to_copy_add_div_mean_mul_ne_pow_rsub_sum_view_2( in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + 4 * r0, None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * r0, None, eviction_policy='evict_last') tmp16 = tl.load(in_ptr1 + (1 + 4 * r0), None, eviction_policy='evict_last') tmp29 = tl.load(in_ptr1 + (2 + 4 * r0), None, eviction_policy='evict_last') tmp42 = tl.load(in_ptr1 + (3 + 4 * r0), None, eviction_policy='evict_last') tmp71 = tl.load(in_ptr0 + (1 + 4 * r0), None, eviction_policy='evict_last') tmp112 = tl.load(in_ptr0 + (2 + 4 * r0), None, eviction_policy='evict_last' ) tmp153 = tl.load(in_ptr0 + (3 + 4 * r0), None, eviction_policy='evict_last' ) tmp2 = tmp1.to(tl.int64) tmp3 = tl.full([1, 1], 0, tl.int64) tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp5 = tl.full([1, 1], 3, tl.int64) tmp6 = triton_helpers.minimum(tmp4, tmp5) tmp7 = tmp6 == tmp3 tmp8 = tmp7.to(tl.int64) tmp9 = tmp8.to(tl.float32) tmp10 = tmp0 * tmp9 tmp11 = 255.0 tmp12 = tmp1 != tmp11 tmp13 = tmp12.to(tl.int64) tmp14 = tmp13.to(tl.float32) tmp15 = tmp10 * tmp14 tmp17 = tmp16.to(tl.int64) tmp18 = triton_helpers.maximum(tmp17, tmp3) tmp19 = triton_helpers.minimum(tmp18, tmp5) tmp20 = tmp19 == tmp3 tmp21 = tmp20.to(tl.int64) tmp22 = tmp21.to(tl.float32) tmp23 = tmp0 * tmp22 tmp24 = tmp16 != tmp11 tmp25 = tmp24.to(tl.int64) tmp26 = tmp25.to(tl.float32) tmp27 = tmp23 * tmp26 tmp28 = tmp15 + tmp27 tmp30 = tmp29.to(tl.int64) tmp31 = triton_helpers.maximum(tmp30, tmp3) tmp32 = triton_helpers.minimum(tmp31, tmp5) tmp33 = tmp32 == tmp3 tmp34 = tmp33.to(tl.int64) tmp35 = tmp34.to(tl.float32) tmp36 = tmp0 * tmp35 tmp37 = tmp29 != tmp11 tmp38 = tmp37.to(tl.int64) tmp39 = tmp38.to(tl.float32) tmp40 = tmp36 * tmp39 tmp41 = tmp28 + tmp40 tmp43 = tmp42.to(tl.int64) tmp44 = triton_helpers.maximum(tmp43, tmp3) tmp45 = triton_helpers.minimum(tmp44, tmp5) tmp46 = tmp45 == tmp3 tmp47 = tmp46.to(tl.int64) tmp48 = tmp47.to(tl.float32) tmp49 = tmp0 * tmp48 tmp50 = tmp42 != tmp11 tmp51 = tmp50.to(tl.int64) tmp52 = tmp51.to(tl.float32) tmp53 = tmp49 * tmp52 tmp54 = tmp41 + tmp53 tmp55 = tmp0 * tmp0 tmp56 = tmp8 * tmp8 tmp57 = tmp56.to(tl.float32) tmp58 = tmp55 + tmp57 tmp59 = tmp21 * tmp21 tmp60 = tmp59.to(tl.float32) tmp61 = tmp55 + tmp60 tmp62 = tmp58 + tmp61 tmp63 = tmp34 * tmp34 tmp64 = tmp63.to(tl.float32) tmp65 = tmp55 + tmp64 tmp66 = tmp62 + tmp65 tmp67 = tmp47 * tmp47 tmp68 = tmp67.to(tl.float32) tmp69 = tmp55 + tmp68 tmp70 = tmp66 + tmp69 tmp72 = tl.full([1, 1], 1, tl.int64) tmp73 = tmp6 == tmp72 tmp74 = tmp73.to(tl.int64) tmp75 = tmp74.to(tl.float32) tmp76 = tmp71 * tmp75 tmp77 = tmp76 * tmp14 tmp78 = tmp19 == tmp72 tmp79 = tmp78.to(tl.int64) tmp80 = tmp79.to(tl.float32) tmp81 = tmp71 * tmp80 tmp82 = tmp81 * tmp26 tmp83 = tmp77 + tmp82 tmp84 = tmp32 == tmp72 tmp85 = tmp84.to(tl.int64) tmp86 = tmp85.to(tl.float32) tmp87 = tmp71 * tmp86 tmp88 = tmp87 * tmp39 tmp89 = tmp83 + tmp88 tmp90 = tmp45 == tmp72 tmp91 = tmp90.to(tl.int64) tmp92 = tmp91.to(tl.float32) tmp93 = tmp71 * tmp92 tmp94 = tmp93 * tmp52 tmp95 = tmp89 + tmp94 tmp96 = tmp71 * tmp71 tmp97 = tmp74 * tmp74 tmp98 = tmp97.to(tl.float32) tmp99 = tmp96 + tmp98 tmp100 = tmp79 * tmp79 tmp101 = tmp100.to(tl.float32) tmp102 = tmp96 + tmp101 tmp103 = tmp99 + tmp102 tmp104 = tmp85 * tmp85 tmp105 = tmp104.to(tl.float32) tmp106 = tmp96 + tmp105 tmp107 = tmp103 + tmp106 tmp108 = tmp91 * tmp91 tmp109 = tmp108.to(tl.float32) tmp110 = tmp96 + tmp109 tmp111 = tmp107 + tmp110 tmp113 = tl.full([1, 1], 2, tl.int64) tmp114 = tmp6 == tmp113 tmp115 = tmp114.to(tl.int64) tmp116 = tmp115.to(tl.float32) tmp117 = tmp112 * tmp116 tmp118 = tmp117 * tmp14 tmp119 = tmp19 == tmp113 tmp120 = tmp119.to(tl.int64) tmp121 = tmp120.to(tl.float32) tmp122 = tmp112 * tmp121 tmp123 = tmp122 * tmp26 tmp124 = tmp118 + tmp123 tmp125 = tmp32 == tmp113 tmp126 = tmp125.to(tl.int64) tmp127 = tmp126.to(tl.float32) tmp128 = tmp112 * tmp127 tmp129 = tmp128 * tmp39 tmp130 = tmp124 + tmp129 tmp131 = tmp45 == tmp113 tmp132 = tmp131.to(tl.int64) tmp133 = tmp132.to(tl.float32) tmp134 = tmp112 * tmp133 tmp135 = tmp134 * tmp52 tmp136 = tmp130 + tmp135 tmp137 = tmp112 * tmp112 tmp138 = tmp115 * tmp115 tmp139 = tmp138.to(tl.float32) tmp140 = tmp137 + tmp139 tmp141 = tmp120 * tmp120 tmp142 = tmp141.to(tl.float32) tmp143 = tmp137 + tmp142 tmp144 = tmp140 + tmp143 tmp145 = tmp126 * tmp126 tmp146 = tmp145.to(tl.float32) tmp147 = tmp137 + tmp146 tmp148 = tmp144 + tmp147 tmp149 = tmp132 * tmp132 tmp150 = tmp149.to(tl.float32) tmp151 = tmp137 + tmp150 tmp152 = tmp148 + tmp151 tmp154 = tmp6 == tmp5 tmp155 = tmp154.to(tl.int64) tmp156 = tmp155.to(tl.float32) tmp157 = tmp153 * tmp156 tmp158 = tmp157 * tmp14 tmp159 = tmp19 == tmp5 tmp160 = tmp159.to(tl.int64) tmp161 = tmp160.to(tl.float32) tmp162 = tmp153 * tmp161 tmp163 = tmp162 * tmp26 tmp164 = tmp158 + tmp163 tmp165 = tmp32 == tmp5 tmp166 = tmp165.to(tl.int64) tmp167 = tmp166.to(tl.float32) tmp168 = tmp153 * tmp167 tmp169 = tmp168 * tmp39 tmp170 = tmp164 + tmp169 tmp171 = tmp45 == tmp5 tmp172 = tmp171.to(tl.int64) tmp173 = tmp172.to(tl.float32) tmp174 = tmp153 * tmp173 tmp175 = tmp174 * tmp52 tmp176 = tmp170 + tmp175 tmp177 = tmp153 * tmp153 tmp178 = tmp155 * tmp155 tmp179 = tmp178.to(tl.float32) tmp180 = tmp177 + tmp179 tmp181 = tmp160 * tmp160 tmp182 = tmp181.to(tl.float32) tmp183 = tmp177 + tmp182 tmp184 = tmp180 + tmp183 tmp185 = tmp166 * tmp166 tmp186 = tmp185.to(tl.float32) tmp187 = tmp177 + tmp186 tmp188 = tmp184 + tmp187 tmp189 = tmp172 * tmp172 tmp190 = tmp189.to(tl.float32) tmp191 = tmp177 + tmp190 tmp192 = tmp188 + tmp191 tmp193 = 2.0 tmp194 = tmp54 * tmp193 tmp195 = 1.0 tmp196 = tmp194 + tmp195 tmp197 = tmp70 + tmp195 tmp198 = tmp196 / tmp197 tmp199 = tmp195 - tmp198 tmp200 = tl.broadcast_to(tmp199, [XBLOCK, RBLOCK]) tmp202 = tl.sum(tmp200, 1)[:, None] tmp203 = tmp95 * tmp193 tmp204 = tmp203 + tmp195 tmp205 = tmp111 + tmp195 tmp206 = tmp204 / tmp205 tmp207 = tmp195 - tmp206 tmp208 = tl.broadcast_to(tmp207, [XBLOCK, RBLOCK]) tmp210 = tl.sum(tmp208, 1)[:, None] tmp211 = tmp136 * tmp193 tmp212 = tmp211 + tmp195 tmp213 = tmp152 + tmp195 tmp214 = tmp212 / tmp213 tmp215 = tmp195 - tmp214 tmp216 = tl.broadcast_to(tmp215, [XBLOCK, RBLOCK]) tmp218 = tl.sum(tmp216, 1)[:, None] tmp219 = tmp176 * tmp193 tmp220 = tmp219 + tmp195 tmp221 = tmp192 + tmp195 tmp222 = tmp220 / tmp221 tmp223 = tmp195 - tmp222 tmp224 = tl.broadcast_to(tmp223, [XBLOCK, RBLOCK]) tmp226 = tl.sum(tmp224, 1)[:, None] tmp227 = 4.0 tmp228 = tmp202 / tmp227 tmp229 = 0.0 tmp230 = tmp228 + tmp229 tmp231 = tmp210 / tmp227 tmp232 = tmp230 + tmp231 tmp233 = tmp218 / tmp227 tmp234 = tmp232 + tmp233 tmp235 = tmp226 / tmp227 tmp236 = tmp234 + tmp235 tmp237 = 0.25 tmp238 = tmp236 * tmp237 tmp239 = tmp238 / tmp195 tmp240 = tmp239 * tmp195 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp240, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(16)](arg0_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del arg0_1 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused__softmax_1[grid(16)](buf0, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf0 buf10 = empty_strided_cuda((), (), torch.float32) buf14 = buf10 del buf10 triton_per_fused__to_copy_add_div_mean_mul_ne_pow_rsub_sum_view_2[grid (1)](buf14, buf1, arg1_1, 1, 4, XBLOCK=1, num_warps=2, num_stages=1 ) del arg1_1 del buf1 return buf14, def reduce_loss(loss, reduction): """Reduce loss as specified. Args: loss (Tensor): Elementwise loss tensor. reduction (str): Options are "none", "mean" and "sum". Return: Tensor: Reduced loss tensor. """ reduction_enum = F._Reduction.get_enum(reduction) if reduction_enum == 0: return loss elif reduction_enum == 1: return loss.mean() elif reduction_enum == 2: return loss.sum() def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None): """Apply element-wise weight and reduce loss. Args: loss (Tensor): Element-wise loss. weight (Tensor): Element-wise weights. reduction (str): Same as built-in losses of PyTorch. avg_factor (float): Avarage factor when computing the mean of losses. Returns: Tensor: Processed loss values. """ if weight is not None: assert weight.dim() == loss.dim() if weight.dim() > 1: assert weight.size(1) == 1 or weight.size(1) == loss.size(1) loss = loss * weight if avg_factor is None: loss = reduce_loss(loss, reduction) elif reduction == 'mean': loss = loss.sum() / avg_factor elif reduction != 'none': raise ValueError('avg_factor can not be used with reduction="sum"') return loss def weighted_loss(loss_func): """Create a weighted version of a given loss function. To use this decorator, the loss function must have the signature like `loss_func(pred, target, **kwargs)`. The function only needs to compute element-wise loss without any reduction. This decorator will add weight and reduction arguments to the function. The decorated function will have the signature like `loss_func(pred, target, weight=None, reduction='mean', avg_factor=None, **kwargs)`. :Example: >>> import torch >>> @weighted_loss >>> def l1_loss(pred, target): >>> return (pred - target).abs() >>> pred = torch.Tensor([0, 2, 3]) >>> target = torch.Tensor([1, 1, 1]) >>> weight = torch.Tensor([1, 0, 1]) >>> l1_loss(pred, target) tensor(1.3333) >>> l1_loss(pred, target, weight) tensor(1.) >>> l1_loss(pred, target, reduction='none') tensor([1., 1., 2.]) >>> l1_loss(pred, target, weight, avg_factor=2) tensor(1.5000) """ @functools.wraps(loss_func) def wrapper(pred, target, weight=None, reduction='mean', avg_factor= None, **kwargs): loss = loss_func(pred, target, **kwargs) loss = weight_reduce_loss(loss, weight, reduction, avg_factor) return loss return wrapper @weighted_loss def binary_dice_loss(pred, target, valid_mask, smooth=1, exponent=2, **kwards): assert pred.shape[0] == target.shape[0] pred = pred.reshape(pred.shape[0], -1) target = target.reshape(target.shape[0], -1) valid_mask = valid_mask.reshape(valid_mask.shape[0], -1) num = torch.sum(torch.mul(pred, target) * valid_mask, dim=1) * 2 + smooth den = torch.sum(pred.pow(exponent) + target.pow(exponent), dim=1) + smooth return 1 - num / den @weighted_loss def dice_loss(pred, target, valid_mask, smooth=1, exponent=2, class_weight= None, ignore_index=255): assert pred.shape[0] == target.shape[0] total_loss = 0 num_classes = pred.shape[1] for i in range(num_classes): if i != ignore_index: dice_loss = binary_dice_loss(pred[:, i], target[..., i], valid_mask=valid_mask, smooth=smooth, exponent=exponent) if class_weight is not None: dice_loss *= class_weight[i] total_loss += dice_loss return total_loss / num_classes def get_class_weight(class_weight): """Get class weight for loss function. Args: class_weight (list[float] | str | None): If class_weight is a str, take it as a file name and read from it. """ if isinstance(class_weight, str): if class_weight.endswith('.npy'): class_weight = np.load(class_weight) else: class_weight = mmcv.load(class_weight) return class_weight class DiceLossNew(nn.Module): """DiceLoss. This loss is proposed in `V-Net: Fully Convolutional Neural Networks for Volumetric Medical Image Segmentation <https://arxiv.org/abs/1606.04797>`_. Args: loss_type (str, optional): Binary or multi-class loss. Default: 'multi_class'. Options are "binary" and "multi_class". smooth (float): A float number to smooth loss, and avoid NaN error. Default: 1 exponent (float): An float number to calculate denominator value: \\sum{x^exponent} + \\sum{y^exponent}. Default: 2. reduction (str, optional): The method used to reduce the loss. Options are "none", "mean" and "sum". This parameter only works when per_image is True. Default: 'mean'. class_weight (list[float] | str, optional): Weight of each class. If in str format, read them from a file. Defaults to None. loss_weight (float, optional): Weight of the loss. Default to 1.0. ignore_index (int | None): The label index to be ignored. Default: 255. """ def __init__(self, smooth=1, exponent=2, reduction='mean', class_weight =None, loss_weight=1.0, ignore_index=255, **kwards): super(DiceLossNew, self).__init__() self.smooth = smooth self.exponent = exponent self.reduction = reduction self.class_weight = get_class_weight(class_weight) self.loss_weight = loss_weight self.ignore_index = ignore_index def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
SeHwanJoo/mmsegmentation_body
DiceLoss
false
1,041
[ "Apache-2.0" ]
0
31c4bf27c3dc0a84bfbb06a0c017c5908c17f0ac
https://github.com/SeHwanJoo/mmsegmentation_body/tree/31c4bf27c3dc0a84bfbb06a0c017c5908c17f0ac
import functools import torch import numpy as np import torch.nn.functional as F import torch.nn as nn import torch._C import torch.serialization def reduce_loss(loss, reduction): """Reduce loss as specified. Args: loss (Tensor): Elementwise loss tensor. reduction (str): Options are "none", "mean" and "sum". Return: Tensor: Reduced loss tensor. """ reduction_enum = F._Reduction.get_enum(reduction) if reduction_enum == 0: return loss elif reduction_enum == 1: return loss.mean() elif reduction_enum == 2: return loss.sum() def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None): """Apply element-wise weight and reduce loss. Args: loss (Tensor): Element-wise loss. weight (Tensor): Element-wise weights. reduction (str): Same as built-in losses of PyTorch. avg_factor (float): Avarage factor when computing the mean of losses. Returns: Tensor: Processed loss values. """ if weight is not None: assert weight.dim() == loss.dim() if weight.dim() > 1: assert weight.size(1) == 1 or weight.size(1) == loss.size(1) loss = loss * weight if avg_factor is None: loss = reduce_loss(loss, reduction) elif reduction == 'mean': loss = loss.sum() / avg_factor elif reduction != 'none': raise ValueError('avg_factor can not be used with reduction="sum"') return loss def weighted_loss(loss_func): """Create a weighted version of a given loss function. To use this decorator, the loss function must have the signature like `loss_func(pred, target, **kwargs)`. The function only needs to compute element-wise loss without any reduction. This decorator will add weight and reduction arguments to the function. The decorated function will have the signature like `loss_func(pred, target, weight=None, reduction='mean', avg_factor=None, **kwargs)`. :Example: >>> import torch >>> @weighted_loss >>> def l1_loss(pred, target): >>> return (pred - target).abs() >>> pred = torch.Tensor([0, 2, 3]) >>> target = torch.Tensor([1, 1, 1]) >>> weight = torch.Tensor([1, 0, 1]) >>> l1_loss(pred, target) tensor(1.3333) >>> l1_loss(pred, target, weight) tensor(1.) >>> l1_loss(pred, target, reduction='none') tensor([1., 1., 2.]) >>> l1_loss(pred, target, weight, avg_factor=2) tensor(1.5000) """ @functools.wraps(loss_func) def wrapper(pred, target, weight=None, reduction='mean', avg_factor= None, **kwargs): loss = loss_func(pred, target, **kwargs) loss = weight_reduce_loss(loss, weight, reduction, avg_factor) return loss return wrapper @weighted_loss def binary_dice_loss(pred, target, valid_mask, smooth=1, exponent=2, **kwards): assert pred.shape[0] == target.shape[0] pred = pred.reshape(pred.shape[0], -1) target = target.reshape(target.shape[0], -1) valid_mask = valid_mask.reshape(valid_mask.shape[0], -1) num = torch.sum(torch.mul(pred, target) * valid_mask, dim=1) * 2 + smooth den = torch.sum(pred.pow(exponent) + target.pow(exponent), dim=1) + smooth return 1 - num / den @weighted_loss def dice_loss(pred, target, valid_mask, smooth=1, exponent=2, class_weight= None, ignore_index=255): assert pred.shape[0] == target.shape[0] total_loss = 0 num_classes = pred.shape[1] for i in range(num_classes): if i != ignore_index: dice_loss = binary_dice_loss(pred[:, i], target[..., i], valid_mask=valid_mask, smooth=smooth, exponent=exponent) if class_weight is not None: dice_loss *= class_weight[i] total_loss += dice_loss return total_loss / num_classes def get_class_weight(class_weight): """Get class weight for loss function. Args: class_weight (list[float] | str # ... truncated (>4000 chars) for memory efficiency
VAE
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/3q/c3qwr2d2rrpjzvnddomnmdy6cwva4hjlvrn2y5epemk4ak3k2m6c.py # Topologically Sorted Source Nodes: [h1], Original ATen: [aten.relu] # Source node to ATen node mapping: # h1 => relu # Graph fragment: # %add_tensor_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_2, %primals_3), kwargs = {}) # %relu : [num_users=3] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_2,), kwargs = {}) triton_poi_fused_relu_0 = async_compile.triton('triton_poi_fused_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2048], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1600 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 400 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/a6/ca6vhpnsixur65k3b6hxegp4job3ylimpyatml46dzhhhkxeihd5.py # Topologically Sorted Source Nodes: [mul, std, mul_1, z], Original ATen: [aten.mul, aten.exp, aten.add] # Source node to ATen node mapping: # mul => mul # mul_1 => mul_1 # std => exp # z => add # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%addmm_2, 0.5), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%mul,), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%randn, %exp), kwargs = {}) # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %addmm_1), kwargs = {}) triton_poi_fused_add_exp_mul_1 = async_compile.triton('triton_poi_fused_add_exp_mul_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_exp_mul_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_exp_mul_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 80 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask) tmp6 = tl.load(in_ptr2 + (x0), xmask) tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = tl_math.exp(tmp3) tmp5 = tmp0 * tmp4 tmp7 = tmp5 + tmp6 tl.store(out_ptr0 + (x0), tmp7, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/hb/chbjjrtszu6f3bhry7ireqcm3ie3twpz5s7g7owb3zuauqhiqcby.py # Topologically Sorted Source Nodes: [sigmoid], Original ATen: [aten.sigmoid] # Source node to ATen node mapping: # sigmoid => sigmoid # Graph fragment: # %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_11), kwargs = {}) # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%add_tensor,), kwargs = {}) triton_poi_fused_sigmoid_2 = async_compile.triton('triton_poi_fused_sigmoid_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4096], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_sigmoid_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 3136 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 784 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tl.store(in_out_ptr0 + (x2), tmp3, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args args.clear() assert_size_stride(primals_1, (4, 784), (784, 1)) assert_size_stride(primals_2, (400, 784), (784, 1)) assert_size_stride(primals_3, (400, ), (1, )) assert_size_stride(primals_4, (20, 400), (400, 1)) assert_size_stride(primals_5, (20, ), (1, )) assert_size_stride(primals_6, (20, 400), (400, 1)) assert_size_stride(primals_7, (20, ), (1, )) assert_size_stride(primals_8, (400, 20), (20, 1)) assert_size_stride(primals_9, (400, ), (1, )) assert_size_stride(primals_10, (784, 400), (400, 1)) assert_size_stride(primals_11, (784, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 400), (400, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (784, 400), (1, 784), 0), out=buf0) del primals_2 buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [h1], Original ATen: [aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_relu_0.run(buf1, primals_3, 1600, grid=grid(1600), stream=stream0) del primals_3 buf2 = empty_strided_cuda((4, 20), (20, 1), torch.float32) # Topologically Sorted Source Nodes: [mu], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, buf1, reinterpret_tensor(primals_4, (400, 20), (1, 400), 0), alpha=1, beta=1, out=buf2) del primals_5 buf3 = empty_strided_cuda((4, 20), (20, 1), torch.float32) # Topologically Sorted Source Nodes: [logvar], Original ATen: [aten.addmm] extern_kernels.addmm(primals_7, buf1, reinterpret_tensor(primals_6, (400, 20), (1, 400), 0), alpha=1, beta=1, out=buf3) del primals_7 # Topologically Sorted Source Nodes: [eps], Original ATen: [aten.randn_like] buf4 = torch.ops.aten.randn.default([4, 20], dtype=torch.float32, device=device(type='cuda', index=0), pin_memory=False) buf5 = buf4 del buf4 buf6 = empty_strided_cuda((4, 20), (20, 1), torch.float32) # Topologically Sorted Source Nodes: [mul, std, mul_1, z], Original ATen: [aten.mul, aten.exp, aten.add] triton_poi_fused_add_exp_mul_1.run(buf5, buf3, buf2, buf6, 80, grid=grid(80), stream=stream0) buf7 = empty_strided_cuda((4, 400), (400, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf6, reinterpret_tensor(primals_8, (20, 400), (1, 20), 0), out=buf7) buf8 = buf7; del buf7 # reuse # Topologically Sorted Source Nodes: [h3], Original ATen: [aten.relu] triton_poi_fused_relu_0.run(buf8, primals_9, 1600, grid=grid(1600), stream=stream0) del primals_9 buf9 = empty_strided_cuda((4, 784), (784, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf8, reinterpret_tensor(primals_10, (400, 784), (1, 400), 0), out=buf9) buf10 = buf9; del buf9 # reuse # Topologically Sorted Source Nodes: [sigmoid], Original ATen: [aten.sigmoid] triton_poi_fused_sigmoid_2.run(buf10, primals_11, 3136, grid=grid(3136), stream=stream0) del primals_11 return (buf10, buf2, buf3, primals_1, buf1, buf3, buf5, buf6, buf8, buf10, primals_10, primals_8, primals_6, primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 784), (784, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((400, 784), (784, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((400, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((20, 400), (400, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((20, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((20, 400), (400, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((20, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((400, 20), (20, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((400, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((784, 400), (400, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((784, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F import torch.onnx import torch.nn.parallel import torch.optim import torch.utils.data import torch.utils.data.distributed import torch.autograd class VAE(nn.Module): def __init__(self): super(VAE, self).__init__() self.fc1 = nn.Linear(784, 400) self.fc21 = nn.Linear(400, 20) self.fc22 = nn.Linear(400, 20) self.fc3 = nn.Linear(20, 400) self.fc4 = nn.Linear(400, 784) def encode(self, x): h1 = F.relu(self.fc1(x)) return self.fc21(h1), self.fc22(h1) def reparameterize(self, mu, logvar): std = torch.exp(0.5 * logvar) eps = torch.randn_like(std) return eps.mul(std).add_(mu) def decode(self, z): h3 = F.relu(self.fc3(z)) return torch.sigmoid(self.fc4(h3)) def forward(self, x): mu, logvar = self.encode(x.view(-1, 784)) z = self.reparameterize(mu, logvar) return self.decode(z), mu, logvar def get_inputs(): return [torch.rand([4, 784])] def get_init_inputs(): return [[], {}]
import torch from torch import device from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch.nn.functional as F import torch.onnx import torch.nn.parallel import torch.optim import torch.utils.data import torch.utils.data.distributed import torch.autograd assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 1600 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 400 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_add_exp_mul_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 80 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp6 = tl.load(in_ptr2 + x0, xmask) tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = tl_math.exp(tmp3) tmp5 = tmp0 * tmp4 tmp7 = tmp5 + tmp6 tl.store(out_ptr0 + x0, tmp7, xmask) @triton.jit def triton_poi_fused_sigmoid_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 3136 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 784 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (4, 784), (784, 1)) assert_size_stride(primals_2, (400, 784), (784, 1)) assert_size_stride(primals_3, (400,), (1,)) assert_size_stride(primals_4, (20, 400), (400, 1)) assert_size_stride(primals_5, (20,), (1,)) assert_size_stride(primals_6, (20, 400), (400, 1)) assert_size_stride(primals_7, (20,), (1,)) assert_size_stride(primals_8, (400, 20), (20, 1)) assert_size_stride(primals_9, (400,), (1,)) assert_size_stride(primals_10, (784, 400), (400, 1)) assert_size_stride(primals_11, (784,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 400), (400, 1), torch.float32) extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (784, 400), (1, 784), 0), out=buf0) del primals_2 buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_relu_0[grid(1600)](buf1, primals_3, 1600, XBLOCK= 256, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((4, 20), (20, 1), torch.float32) extern_kernels.addmm(primals_5, buf1, reinterpret_tensor(primals_4, (400, 20), (1, 400), 0), alpha=1, beta=1, out=buf2) del primals_5 buf3 = empty_strided_cuda((4, 20), (20, 1), torch.float32) extern_kernels.addmm(primals_7, buf1, reinterpret_tensor(primals_6, (400, 20), (1, 400), 0), alpha=1, beta=1, out=buf3) del primals_7 buf4 = torch.ops.aten.randn.default([4, 20], dtype=torch.float32, device=device(type='cuda', index=0), pin_memory=False) buf5 = buf4 del buf4 buf6 = empty_strided_cuda((4, 20), (20, 1), torch.float32) triton_poi_fused_add_exp_mul_1[grid(80)](buf5, buf3, buf2, buf6, 80, XBLOCK=128, num_warps=4, num_stages=1) buf7 = empty_strided_cuda((4, 400), (400, 1), torch.float32) extern_kernels.mm(buf6, reinterpret_tensor(primals_8, (20, 400), (1, 20), 0), out=buf7) buf8 = buf7 del buf7 triton_poi_fused_relu_0[grid(1600)](buf8, primals_9, 1600, XBLOCK= 256, num_warps=4, num_stages=1) del primals_9 buf9 = empty_strided_cuda((4, 784), (784, 1), torch.float32) extern_kernels.mm(buf8, reinterpret_tensor(primals_10, (400, 784), (1, 400), 0), out=buf9) buf10 = buf9 del buf9 triton_poi_fused_sigmoid_2[grid(3136)](buf10, primals_11, 3136, XBLOCK=256, num_warps=4, num_stages=1) del primals_11 return (buf10, buf2, buf3, primals_1, buf1, buf3, buf5, buf6, buf8, buf10, primals_10, primals_8, primals_6, primals_4) class VAENew(nn.Module): def __init__(self): super(VAENew, self).__init__() self.fc1 = nn.Linear(784, 400) self.fc21 = nn.Linear(400, 20) self.fc22 = nn.Linear(400, 20) self.fc3 = nn.Linear(20, 400) self.fc4 = nn.Linear(400, 784) def encode(self, x): h1 = F.relu(self.fc1(x)) return self.fc21(h1), self.fc22(h1) def reparameterize(self, mu, logvar): std = torch.exp(0.5 * logvar) eps = torch.randn_like(std) return eps.mul(std).add_(mu) def decode(self, z): h3 = F.relu(self.fc3(z)) return torch.sigmoid(self.fc4(h3)) def forward(self, input_0): primals_2 = self.fc1.weight primals_3 = self.fc1.bias primals_4 = self.fc21.weight primals_5 = self.fc21.bias primals_6 = self.fc22.weight primals_7 = self.fc22.bias primals_8 = self.fc3.weight primals_9 = self.fc3.bias primals_10 = self.fc4.weight primals_11 = self.fc4.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0], output[1], output[2]
ScorpioDoctor/antares02
VAE
false
1,042
[ "BSD-3-Clause" ]
0
631b817d2e98f351d1173b620d15c4a5efed11da
https://github.com/ScorpioDoctor/antares02/tree/631b817d2e98f351d1173b620d15c4a5efed11da
import torch import torch.nn as nn import torch.nn.functional as F import torch.onnx import torch.nn.parallel import torch.optim import torch.utils.data import torch.utils.data.distributed import torch.autograd class Model(nn.Module): def __init__(self): super().__init__() self.fc1 = nn.Linear(784, 400) self.fc21 = nn.Linear(400, 20) self.fc22 = nn.Linear(400, 20) self.fc3 = nn.Linear(20, 400) self.fc4 = nn.Linear(400, 784) def encode(self, x): h1 = F.relu(self.fc1(x)) return self.fc21(h1), self.fc22(h1) def reparameterize(self, mu, logvar): std = torch.exp(0.5 * logvar) eps = torch.randn_like(std) return eps.mul(std).add_(mu) def decode(self, z): h3 = F.relu(self.fc3(z)) return torch.sigmoid(self.fc4(h3)) def forward(self, x): mu, logvar = self.encode(x.view(-1, 784)) z = self.reparameterize(mu, logvar) return self.decode(z), mu, logvar def get_inputs(): return [torch.rand([4, 784])] def get_init_inputs(): return []
MultiHeadAttention
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/yj/cyjay5w6yqtzjhwrra5u36ugjzcr3epsjgaero2ugqpqx7xjycai.py # Topologically Sorted Source Nodes: [attention_score_2], Original ATen: [aten._softmax] # Source node to ATen node mapping: # attention_score_2 => exp, sum_1 # Graph fragment: # %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_11, 1), kwargs = {}) # %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [-1], True), kwargs = {}) # %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {}) # %div_tensor : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor, 1.0), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (4*x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr1 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr1 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tmp6 = tmp0 * tmp5 tmp7 = tmp6 * tmp3 tmp8 = triton_helpers.maximum(tmp4, tmp7) tmp10 = tmp0 * tmp9 tmp11 = tmp10 * tmp3 tmp12 = triton_helpers.maximum(tmp8, tmp11) tmp14 = tmp0 * tmp13 tmp15 = tmp14 * tmp3 tmp16 = triton_helpers.maximum(tmp12, tmp15) tmp17 = tmp4 - tmp16 tmp18 = tmp17 * tmp3 tmp19 = tl_math.exp(tmp18) tmp20 = tmp7 - tmp16 tmp21 = tmp20 * tmp3 tmp22 = tl_math.exp(tmp21) tmp23 = tmp19 + tmp22 tmp24 = tmp11 - tmp16 tmp25 = tmp24 * tmp3 tmp26 = tl_math.exp(tmp25) tmp27 = tmp23 + tmp26 tmp28 = tmp15 - tmp16 tmp29 = tmp28 * tmp3 tmp30 = tl_math.exp(tmp29) tmp31 = tmp27 + tmp30 tl.store(out_ptr0 + (x2), tmp16, xmask) tl.store(out_ptr1 + (x2), tmp31, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/eq/ceqzjquocsdhf6v3jex4eilxuabityntyouuqz2e5hg6cpglxepm.py # Topologically Sorted Source Nodes: [attention_score_2], Original ATen: [aten._softmax] # Source node to ATen node mapping: # attention_score_2 => div_1, exp # Graph fragment: # %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_11, 1), kwargs = {}) # %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [-1], True), kwargs = {}) # %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {}) # %div_tensor : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor, 1.0), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor,), kwargs = {}) # %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = (xindex // 4) x0 = xindex % 4 x2 = (xindex // 16) x4 = xindex tmp0 = tl.load(in_ptr0 + (x3), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x0 + (4*x2)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr2 + (x3), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr3 + (x3), xmask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tmp6 = tmp4 - tmp5 tmp7 = tmp6 * tmp3 tmp8 = tl_math.exp(tmp7) tmp10 = tmp8 / tmp9 tl.store(out_ptr0 + (x4), tmp10, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (4, ), (1, )) assert_size_stride(primals_9, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_10, (4, 4), (4, 1)) assert_size_stride(primals_11, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [prj_query], Original ATen: [aten.addmm] extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [prj_key], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, reinterpret_tensor(primals_6, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_4 del primals_5 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [prj_value], Original ATen: [aten.addmm] extern_kernels.addmm(primals_8, reinterpret_tensor(primals_9, (16, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_7 del primals_8 buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) # Topologically Sorted Source Nodes: [attention_score_2], Original ATen: [aten._softmax] stream0 = get_raw_stream(0) triton_poi_fused__softmax_0.run(buf0, buf1, buf3, buf4, 64, grid=grid(64), stream=stream0) buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [attention_score_2], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf0, buf1, buf3, buf4, buf5, 256, grid=grid(256), stream=stream0) buf6 = reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 1), 0); del buf4 # reuse # Topologically Sorted Source Nodes: [result], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf5, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0), out=buf6) buf7 = reinterpret_tensor(buf3, (16, 4), (4, 1), 0); del buf3 # reuse # Topologically Sorted Source Nodes: [output_1], Original ATen: [aten.addmm] extern_kernels.addmm(primals_11, reinterpret_tensor(buf6, (16, 4), (4, 1), 0), reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf7) del primals_11 return (reinterpret_tensor(buf7, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), buf0, reinterpret_tensor(primals_6, (16, 4), (4, 1), 0), buf1, reinterpret_tensor(primals_9, (16, 4), (4, 1), 0), buf5, reinterpret_tensor(buf6, (16, 4), (4, 1), 0), primals_10, reinterpret_tensor(buf2, (16, 1, 4), (4, 1, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch import torch.nn as nn import torch.nn.functional as F class ScaledDotProductAttention(nn.Module): def __init__(self): super(ScaledDotProductAttention, self).__init__() def forward(self, query, key, value, mask=None): _1, _2, query_sequence_length, _3 = query.size() batch_size, num_head, key_sequence_length, size_per_head = key.size() query = query.view(batch_size, num_head, query_sequence_length, size_per_head) key = key.view(batch_size, num_head, size_per_head, key_sequence_length ) attention_score = torch.einsum('abcd, abde -> abce', query, key) attention_score = attention_score / math.sqrt(size_per_head) if mask is not None: attention_score = attention_score.masked_fill(mask == 0, - 1000000000.0) attention_score = F.softmax(attention_score, dim=-1) result = attention_score @ value return result, attention_score class MultiHeadAttention(nn.Module): def __init__(self, model_dim, key_dim, value_dim, num_head): super(MultiHeadAttention, self).__init__() self.model_dim = model_dim self.key_dim = key_dim self.value_dim = value_dim self.num_head = num_head self.Wq = nn.Linear(model_dim, key_dim) self.Wk = nn.Linear(model_dim, key_dim) self.Wv = nn.Linear(model_dim, value_dim) self.attention = ScaledDotProductAttention() self.Wo = nn.Linear(value_dim, model_dim) def forward(self, query, key, value, mask=None): prj_query = self.Wq(query) prj_key = self.Wk(key) prj_value = self.Wv(value) multihead_query = self.multihead_split(prj_query) multihead_key = self.multihead_split(prj_key) multihead_value = self.multihead_split(prj_value) attention_output, _attention_score = self.attention(multihead_query, multihead_key, multihead_value, mask=mask) output = self.multihead_concat(attention_output) output = self.Wo(output) return output def multihead_split(self, tensor): batch_size, sequence_length, hidden_size = tensor.size() size_per_head = hidden_size // self.num_head return tensor.view(batch_size, self.num_head, sequence_length, size_per_head) def multihead_concat(self, tensor): batch_size, num_head, sequence_length, size_per_head = tensor.size() hidden_size = num_head * size_per_head return tensor.view(batch_size, sequence_length, hidden_size) def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4]) ] def get_init_inputs(): return [[], {'model_dim': 4, 'key_dim': 4, 'value_dim': 4, 'num_head': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import math import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__softmax_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 * tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tmp6 = tmp0 * tmp5 tmp7 = tmp6 * tmp3 tmp8 = triton_helpers.maximum(tmp4, tmp7) tmp10 = tmp0 * tmp9 tmp11 = tmp10 * tmp3 tmp12 = triton_helpers.maximum(tmp8, tmp11) tmp14 = tmp0 * tmp13 tmp15 = tmp14 * tmp3 tmp16 = triton_helpers.maximum(tmp12, tmp15) tmp17 = tmp4 - tmp16 tmp18 = tmp17 * tmp3 tmp19 = tl_math.exp(tmp18) tmp20 = tmp7 - tmp16 tmp21 = tmp20 * tmp3 tmp22 = tl_math.exp(tmp21) tmp23 = tmp19 + tmp22 tmp24 = tmp11 - tmp16 tmp25 = tmp24 * tmp3 tmp26 = tl_math.exp(tmp25) tmp27 = tmp23 + tmp26 tmp28 = tmp15 - tmp16 tmp29 = tmp28 * tmp3 tmp30 = tl_math.exp(tmp29) tmp31 = tmp27 + tmp30 tl.store(out_ptr0 + x2, tmp16, xmask) tl.store(out_ptr1 + x2, tmp31, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex // 4 x0 = xindex % 4 x2 = xindex // 16 x4 = xindex tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp5 = tl.load(in_ptr2 + x3, xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr3 + x3, xmask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tmp6 = tmp4 - tmp5 tmp7 = tmp6 * tmp3 tmp8 = tl_math.exp(tmp7) tmp10 = tmp8 / tmp9 tl.store(out_ptr0 + x4, tmp10, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_10, (4, 4), (4, 1)) assert_size_stride(primals_11, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(primals_6, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf1) del primals_4 del primals_5 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_8, reinterpret_tensor(primals_9, (16, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf2) del primals_7 del primals_8 buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(64)](buf0, buf1, buf3, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1) buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_1[grid(256)](buf0, buf1, buf3, buf4, buf5, 256, XBLOCK=256, num_warps=4, num_stages=1) buf6 = reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 1), 0) del buf4 extern_kernels.bmm(reinterpret_tensor(buf5, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0), out=buf6) buf7 = reinterpret_tensor(buf3, (16, 4), (4, 1), 0) del buf3 extern_kernels.addmm(primals_11, reinterpret_tensor(buf6, (16, 4), (4, 1), 0), reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf7) del primals_11 return reinterpret_tensor(buf7, (4, 4, 4), (16, 4, 1), 0 ), reinterpret_tensor(primals_3, (16, 4), (4, 1), 0 ), buf0, reinterpret_tensor(primals_6, (16, 4), (4, 1), 0 ), buf1, reinterpret_tensor(primals_9, (16, 4), (4, 1), 0 ), buf5, reinterpret_tensor(buf6, (16, 4), (4, 1), 0 ), primals_10, reinterpret_tensor(buf2, (16, 1, 4), (4, 1, 1), 0) class ScaledDotProductAttention(nn.Module): def __init__(self): super(ScaledDotProductAttention, self).__init__() def forward(self, query, key, value, mask=None): _1, _2, query_sequence_length, _3 = query.size() batch_size, num_head, key_sequence_length, size_per_head = key.size() query = query.view(batch_size, num_head, query_sequence_length, size_per_head) key = key.view(batch_size, num_head, size_per_head, key_sequence_length ) attention_score = torch.einsum('abcd, abde -> abce', query, key) attention_score = attention_score / math.sqrt(size_per_head) if mask is not None: attention_score = attention_score.masked_fill(mask == 0, - 1000000000.0) attention_score = F.softmax(attention_score, dim=-1) result = attention_score @ value return result, attention_score class MultiHeadAttentionNew(nn.Module): def __init__(self, model_dim, key_dim, value_dim, num_head): super(MultiHeadAttentionNew, self).__init__() self.model_dim = model_dim self.key_dim = key_dim self.value_dim = value_dim self.num_head = num_head self.Wq = nn.Linear(model_dim, key_dim) self.Wk = nn.Linear(model_dim, key_dim) self.Wv = nn.Linear(model_dim, value_dim) self.attention = ScaledDotProductAttention() self.Wo = nn.Linear(value_dim, model_dim) def multihead_split(self, tensor): batch_size, sequence_length, hidden_size = tensor.size() size_per_head = hidden_size // self.num_head return tensor.view(batch_size, self.num_head, sequence_length, size_per_head) def multihead_concat(self, tensor): batch_size, num_head, sequence_length, size_per_head = tensor.size() hidden_size = num_head * size_per_head return tensor.view(batch_size, sequence_length, hidden_size) def forward(self, input_0, input_1, input_2): primals_1 = self.Wq.weight primals_2 = self.Wq.bias primals_4 = self.Wk.weight primals_5 = self.Wk.bias primals_7 = self.Wv.weight primals_8 = self.Wv.bias primals_10 = self.Wo.weight primals_11 = self.Wo.bias primals_3 = input_0 primals_6 = input_1 primals_9 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0]
SeungoneKim/Transformer_implementation
MultiHeadAttention
false
1,043
[ "Apache-2.0" ]
0
a52bf552eb645fc9bfb812cc26842fc147d6c008
https://github.com/SeungoneKim/Transformer_implementation/tree/a52bf552eb645fc9bfb812cc26842fc147d6c008
import math import torch import torch.nn as nn import torch.nn.functional as F class ScaledDotProductAttention(nn.Module): def __init__(self): super().__init__() def forward(self, query, key, value, mask=None): _1, _2, query_sequence_length, _3 = query.size() batch_size, num_head, key_sequence_length, size_per_head = key.size() query = query.view(batch_size, num_head, query_sequence_length, size_per_head) key = key.view(batch_size, num_head, size_per_head, key_sequence_length ) attention_score = torch.einsum('abcd, abde -> abce', query, key) attention_score = attention_score / math.sqrt(size_per_head) if mask is not None: attention_score = attention_score.masked_fill(mask == 0, - 1000000000.0) attention_score = F.softmax(attention_score, dim=-1) result = attention_score @ value return result, attention_score class Model(nn.Module): def __init__(self, model_dim, key_dim, value_dim, num_head): super().__init__() self.model_dim = model_dim self.key_dim = key_dim self.value_dim = value_dim self.num_head = num_head self.Wq = nn.Linear(model_dim, key_dim) self.Wk = nn.Linear(model_dim, key_dim) self.Wv = nn.Linear(model_dim, value_dim) self.attention = ScaledDotProductAttention() self.Wo = nn.Linear(value_dim, model_dim) def forward(self, query, key, value, mask=None): prj_query = self.Wq(query) prj_key = self.Wk(key) prj_value = self.Wv(value) multihead_query = self.multihead_split(prj_query) multihead_key = self.multihead_split(prj_key) multihead_value = self.multihead_split(prj_value) attention_output, _attention_score = self.attention(multihead_query, multihead_key, multihead_value, mask=mask) output = self.multihead_concat(attention_output) output = self.Wo(output) return output def multihead_split(self, tensor): batch_size, sequence_length, hidden_size = tensor.size() size_per_head = hidden_size // self.num_head return tensor.view(batch_size, self.num_head, sequence_length, size_per_head) def multihead_concat(self, tensor): batch_size, num_head, sequence_length, size_per_head = tensor.size() hidden_size = num_head * size_per_head return tensor.view(batch_size, sequence_length, hidden_size) def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4]) ] def get_init_inputs(): return [4, 4, 4, 4]
Encoding
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/yx/cyx5u6kg47bcb2a4mvrlkw5ynd42h4mj76hk2j6tveptehbkmic4.py # Topologically Sorted Source Nodes: [sub, pow_1, sum_1, scaled_l2_norm], Original ATen: [aten.sub, aten.pow, aten.sum, aten.mul] # Source node to ATen node mapping: # pow_1 => pow_1 # scaled_l2_norm => mul # sub => sub # sum_1 => sum_1 # Graph fragment: # %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%expand, %view_2), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [3]), kwargs = {}) # %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, %sum_1), kwargs = {}) triton_poi_fused_mul_pow_sub_sum_0 = async_compile.triton('triton_poi_fused_mul_pow_sub_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_pow_sub_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_pow_sub_sum_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) % 16 x2 = (xindex // 64) x4 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x1 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + (4*x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + (16 + x1 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr2 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr1 + (32 + x1 + (64*x2)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr2 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr1 + (48 + x1 + (64*x2)), xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr2 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp3 = tmp1 - tmp2 tmp4 = tmp3 * tmp3 tmp7 = tmp5 - tmp6 tmp8 = tmp7 * tmp7 tmp9 = tmp4 + tmp8 tmp12 = tmp10 - tmp11 tmp13 = tmp12 * tmp12 tmp14 = tmp9 + tmp13 tmp17 = tmp15 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp14 + tmp18 tmp20 = tmp0 * tmp19 tl.store(out_ptr0 + (x4), tmp20, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/hz/chz2sqsqk26mwhf2dxhgh44jfpu2er5yqjftwkzfav5ctqtx5e7f.py # Topologically Sorted Source Nodes: [assignment_weights], Original ATen: [aten._softmax] # Source node to ATen node mapping: # assignment_weights => amax, exp, sub_1 # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul, [2], True), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_1,), kwargs = {}) triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x2), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/3f/c3fx6bzkalkw7u7askqdnz4rzlcoyqiec4r434sjc5x3axxgkrmr.py # Topologically Sorted Source Nodes: [assignment_weights], Original ATen: [aten._softmax] # Source node to ATen node mapping: # assignment_weights => div, sum_2 # Graph fragment: # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [2], True), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_2), kwargs = {}) triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/36/c36bunrpokrzs5svt4e6kwmfyyitmjh7nivu5rc6sidx55znumrf.py # Topologically Sorted Source Nodes: [sub, mul_1, encoded_feat], Original ATen: [aten.sub, aten.mul, aten.sum] # Source node to ATen node mapping: # encoded_feat => sum_3 # mul_1 => mul_1 # sub => sub # Graph fragment: # %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%expand, %view_2), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%unsqueeze_2, %sub), kwargs = {}) # %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_1, [1]), kwargs = {}) triton_per_fused_mul_sub_sum_3 = async_compile.triton('triton_per_fused_mul_sub_sum_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[64, 16], reduction_hint=ReductionHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mul_sub_sum_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_mul_sub_sum_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 64 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r3 = rindex x1 = (xindex // 4) % 4 x2 = (xindex // 16) x0 = xindex % 4 x4 = xindex % 16 x5 = xindex tmp0 = tl.load(in_ptr0 + (x1 + (4*r3) + (64*x2)), xmask, eviction_policy='evict_last', other=0.0) tmp1 = tl.load(in_ptr1 + (r3 + (16*x0) + (64*x2)), xmask, eviction_policy='evict_last', other=0.0) tmp2 = tl.load(in_ptr2 + (x4), xmask, eviction_policy='evict_last') tmp3 = tmp1 - tmp2 tmp4 = tmp0 * tmp3 tmp5 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK]) tmp7 = tl.where(xmask, tmp5, 0) tmp8 = tl.sum(tmp7, 1)[:, None] tl.store(out_ptr0 + (x5), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [sub, pow_1, sum_1, scaled_l2_norm], Original ATen: [aten.sub, aten.pow, aten.sum, aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_mul_pow_sub_sum_0.run(primals_3, primals_1, primals_2, buf0, 256, grid=grid(256), stream=stream0) buf1 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [assignment_weights], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf0, buf1, 256, grid=grid(256), stream=stream0) buf2 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [assignment_weights], Original ATen: [aten._softmax] triton_poi_fused__softmax_2.run(buf1, buf2, 256, grid=grid(256), stream=stream0) del buf1 buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [sub, mul_1, encoded_feat], Original ATen: [aten.sub, aten.mul, aten.sum] triton_per_fused_mul_sub_sum_3.run(buf2, primals_1, primals_2, buf3, 64, 16, grid=grid(64), stream=stream0) del buf2 return (buf3, primals_1, primals_2, primals_3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F import torch.nn as nn import torch._C import torch.serialization class Encoding(nn.Module): """Encoding Layer: a learnable residual encoder. Input is of shape (batch_size, channels, height, width). Output is of shape (batch_size, num_codes, channels). Args: channels: dimension of the features or feature channels num_codes: number of code words """ def __init__(self, channels, num_codes): super(Encoding, self).__init__() self.channels, self.num_codes = channels, num_codes std = 1.0 / (num_codes * channels) ** 0.5 self.codewords = nn.Parameter(torch.empty(num_codes, channels, dtype=torch.float).uniform_(-std, std), requires_grad=True) self.scale = nn.Parameter(torch.empty(num_codes, dtype=torch.float) .uniform_(-1, 0), requires_grad=True) @staticmethod def scaled_l2(x, codewords, scale): num_codes, channels = codewords.size() batch_size = x.size(0) reshaped_scale = scale.view((1, 1, num_codes)) expanded_x = x.unsqueeze(2).expand((batch_size, x.size(1), num_codes, channels)) reshaped_codewords = codewords.view((1, 1, num_codes, channels)) scaled_l2_norm = reshaped_scale * (expanded_x - reshaped_codewords ).pow(2).sum(dim=3) return scaled_l2_norm @staticmethod def aggregate(assignment_weights, x, codewords): num_codes, channels = codewords.size() reshaped_codewords = codewords.view((1, 1, num_codes, channels)) batch_size = x.size(0) expanded_x = x.unsqueeze(2).expand((batch_size, x.size(1), num_codes, channels)) encoded_feat = (assignment_weights.unsqueeze(3) * (expanded_x - reshaped_codewords)).sum(dim=1) return encoded_feat def forward(self, x): assert x.dim() == 4 and x.size(1) == self.channels batch_size = x.size(0) x = x.view(batch_size, self.channels, -1).transpose(1, 2).contiguous() assignment_weights = F.softmax(self.scaled_l2(x, self.codewords, self.scale), dim=2) encoded_feat = self.aggregate(assignment_weights, x, self.codewords) return encoded_feat def __repr__(self): repr_str = self.__class__.__name__ repr_str += ( f'(Nx{self.channels}xHxW =>Nx{self.num_codes}x{self.channels})') return repr_str def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'channels': 4, 'num_codes': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch._C import torch.serialization assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mul_pow_sub_sum_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 16 x2 = xindex // 64 x4 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x1 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr2 + 4 * x0, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + (16 + x1 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr2 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr1 + (32 + x1 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp11 = tl.load(in_ptr2 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp15 = tl.load(in_ptr1 + (48 + x1 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp16 = tl.load(in_ptr2 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp3 = tmp1 - tmp2 tmp4 = tmp3 * tmp3 tmp7 = tmp5 - tmp6 tmp8 = tmp7 * tmp7 tmp9 = tmp4 + tmp8 tmp12 = tmp10 - tmp11 tmp13 = tmp12 * tmp12 tmp14 = tmp9 + tmp13 tmp17 = tmp15 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp14 + tmp18 tmp20 = tmp0 * tmp19 tl.store(out_ptr0 + x4, tmp20, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_per_fused_mul_sub_sum_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 64 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r3 = rindex x1 = xindex // 4 % 4 x2 = xindex // 16 x0 = xindex % 4 x4 = xindex % 16 x5 = xindex tmp0 = tl.load(in_ptr0 + (x1 + 4 * r3 + 64 * x2), xmask, eviction_policy='evict_last', other=0.0) tmp1 = tl.load(in_ptr1 + (r3 + 16 * x0 + 64 * x2), xmask, eviction_policy='evict_last', other=0.0) tmp2 = tl.load(in_ptr2 + x4, xmask, eviction_policy='evict_last') tmp3 = tmp1 - tmp2 tmp4 = tmp0 * tmp3 tmp5 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK]) tmp7 = tl.where(xmask, tmp5, 0) tmp8 = tl.sum(tmp7, 1)[:, None] tl.store(out_ptr0 + x5, tmp8, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_pow_sub_sum_0[grid(256)](primals_3, primals_1, primals_2, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) buf1 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.float32) triton_poi_fused__softmax_1[grid(256)](buf0, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1) buf2 = buf0 del buf0 triton_poi_fused__softmax_2[grid(256)](buf1, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf1 buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_per_fused_mul_sub_sum_3[grid(64)](buf2, primals_1, primals_2, buf3, 64, 16, XBLOCK=32, num_warps=4, num_stages=1) del buf2 return buf3, primals_1, primals_2, primals_3 class EncodingNew(nn.Module): """Encoding Layer: a learnable residual encoder. Input is of shape (batch_size, channels, height, width). Output is of shape (batch_size, num_codes, channels). Args: channels: dimension of the features or feature channels num_codes: number of code words """ def __init__(self, channels, num_codes): super(EncodingNew, self).__init__() self.channels, self.num_codes = channels, num_codes std = 1.0 / (num_codes * channels) ** 0.5 self.codewords = nn.Parameter(torch.empty(num_codes, channels, dtype=torch.float).uniform_(-std, std), requires_grad=True) self.scale = nn.Parameter(torch.empty(num_codes, dtype=torch.float) .uniform_(-1, 0), requires_grad=True) @staticmethod def scaled_l2(x, codewords, scale): num_codes, channels = codewords.size() batch_size = x.size(0) reshaped_scale = scale.view((1, 1, num_codes)) expanded_x = x.unsqueeze(2).expand((batch_size, x.size(1), num_codes, channels)) reshaped_codewords = codewords.view((1, 1, num_codes, channels)) scaled_l2_norm = reshaped_scale * (expanded_x - reshaped_codewords ).pow(2).sum(dim=3) return scaled_l2_norm @staticmethod def aggregate(assignment_weights, x, codewords): num_codes, channels = codewords.size() reshaped_codewords = codewords.view((1, 1, num_codes, channels)) batch_size = x.size(0) expanded_x = x.unsqueeze(2).expand((batch_size, x.size(1), num_codes, channels)) encoded_feat = (assignment_weights.unsqueeze(3) * (expanded_x - reshaped_codewords)).sum(dim=1) return encoded_feat def __repr__(self): repr_str = self.__class__.__name__ repr_str += ( f'(Nx{self.channels}xHxW =>Nx{self.num_codes}x{self.channels})') return repr_str def forward(self, input_0): primals_2 = self.codewords primals_3 = self.scale primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
SeHwanJoo/mmsegmentation_body
Encoding
false
1,044
[ "Apache-2.0" ]
0
31c4bf27c3dc0a84bfbb06a0c017c5908c17f0ac
https://github.com/SeHwanJoo/mmsegmentation_body/tree/31c4bf27c3dc0a84bfbb06a0c017c5908c17f0ac
import torch import torch.nn.functional as F import torch.nn as nn import torch._C import torch.serialization class Model(nn.Module): """Encoding Layer: a learnable residual encoder. Input is of shape (batch_size, channels, height, width). Output is of shape (batch_size, num_codes, channels). Args: channels: dimension of the features or feature channels num_codes: number of code words """ def __init__(self, channels, num_codes): super().__init__() self.channels, self.num_codes = channels, num_codes std = 1.0 / (num_codes * channels) ** 0.5 self.codewords = nn.Parameter(torch.empty(num_codes, channels, dtype=torch.float).uniform_(-std, std), requires_grad=True) self.scale = nn.Parameter(torch.empty(num_codes, dtype=torch.float) .uniform_(-1, 0), requires_grad=True) @staticmethod def scaled_l2(x, codewords, scale): num_codes, channels = codewords.size() batch_size = x.size(0) reshaped_scale = scale.view((1, 1, num_codes)) expanded_x = x.unsqueeze(2).expand((batch_size, x.size(1), num_codes, channels)) reshaped_codewords = codewords.view((1, 1, num_codes, channels)) scaled_l2_norm = reshaped_scale * (expanded_x - reshaped_codewords ).pow(2).sum(dim=3) return scaled_l2_norm @staticmethod def aggregate(assignment_weights, x, codewords): num_codes, channels = codewords.size() reshaped_codewords = codewords.view((1, 1, num_codes, channels)) batch_size = x.size(0) expanded_x = x.unsqueeze(2).expand((batch_size, x.size(1), num_codes, channels)) encoded_feat = (assignment_weights.unsqueeze(3) * (expanded_x - reshaped_codewords)).sum(dim=1) return encoded_feat def forward(self, x): assert x.dim() == 4 and x.size(1) == self.channels batch_size = x.size(0) x = x.view(batch_size, self.channels, -1).transpose(1, 2).contiguous() assignment_weights = F.softmax(self.scaled_l2(x, self.codewords, self.scale), dim=2) encoded_feat = self.aggregate(assignment_weights, x, self.codewords) return encoded_feat def __repr__(self): repr_str = self.__class__.__name__ repr_str += ( f'(Nx{self.channels}xHxW =>Nx{self.num_codes}x{self.channels})') return repr_str def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 4]
CnnNet
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/t4/ct4qtnuxylso56lddzt74wqzxctat7bgtw43ghj6wwn25j2c7msg.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2048, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 2048 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 32 y1 = (yindex // 32) tmp0 = tl.load(in_ptr0 + (x2 + (4*y3)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (32*x2) + (128*y1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/gk/cgk5p6nacclp2ot6bn6nbnbresf5zlczkwvdxtsnsyi5hbqd3xsi.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8192, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 8192 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = (yindex // 64) tmp0 = tl.load(in_ptr0 + (x2 + (4*y3)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (64*x2) + (256*y1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/uc/cuconme7l5q53n3sxefuxolugwrfgw262laltqajqmscann6mybi.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] # Source node to ATen node mapping: # x => convolution # Graph fragment: # %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_2 = async_compile.triton('triton_poi_fused_convolution_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128, 16384], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_2(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 128 xnumel = 8836 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 32 y1 = (yindex // 32) tmp0 = tl.load(in_ptr0 + (x2 + (8836*y3)), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (y0 + (32*x2) + (282752*y1)), tmp2, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/ld/cldjotsifmpevio42aswbawvkexbmusj2bxnqz7d4jaevrxn5vht.py # Topologically Sorted Source Nodes: [max_pool2d, x_1], Original ATen: [aten.max_pool2d_with_indices, aten.relu] # Source node to ATen node mapping: # max_pool2d => _low_memory_max_pool2d_with_offsets, getitem_1 # x_1 => relu # Graph fragment: # %_low_memory_max_pool2d_with_offsets : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%convolution, [2, 2], [2, 2], [0, 0], [1, 1], False), kwargs = {}) # %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%getitem,), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_relu_3 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_relu_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[524288], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i8', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_relu_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_relu_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 282752 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 32 x1 = (xindex // 32) % 47 x2 = (xindex // 1504) x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (64*x1) + (6016*x2)), xmask) tmp1 = tl.load(in_ptr0 + (32 + x0 + (64*x1) + (6016*x2)), xmask) tmp7 = tl.load(in_ptr0 + (3008 + x0 + (64*x1) + (6016*x2)), xmask) tmp12 = tl.load(in_ptr0 + (3040 + x0 + (64*x1) + (6016*x2)), xmask) tmp2 = tmp1 > tmp0 tmp3 = tl.full([1], 1, tl.int8) tmp4 = tl.full([1], 0, tl.int8) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp0) tmp8 = tmp7 > tmp6 tmp9 = tl.full([1], 2, tl.int8) tmp10 = tl.where(tmp8, tmp9, tmp5) tmp11 = triton_helpers.maximum(tmp7, tmp6) tmp13 = tmp12 > tmp11 tmp14 = tl.full([1], 3, tl.int8) tmp15 = tl.where(tmp13, tmp14, tmp10) tmp16 = triton_helpers.maximum(tmp12, tmp11) tmp17 = tl.full([1], 0, tl.int32) tmp18 = triton_helpers.maximum(tmp17, tmp16) tl.store(out_ptr0 + (x3), tmp15, xmask) tl.store(out_ptr1 + (x3), tmp18, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/dj/cdjdw64ewlwdxlw4dclphhticnt624zm6q6wavwuwwofbgbk4ply.py # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution] # Source node to ATen node mapping: # x_2 => convolution_1 # Graph fragment: # %convolution_1 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_4 = async_compile.triton('triton_poi_fused_convolution_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1048576], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 541696 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x2), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/b6/cb6puipugnxx2nscy7ndskuler7ba6tm3lmyvm6pr62rqflvor2h.py # Topologically Sorted Source Nodes: [max_pool2d_1, x_3], Original ATen: [aten.max_pool2d_with_indices, aten.relu] # Source node to ATen node mapping: # max_pool2d_1 => _low_memory_max_pool2d_with_offsets_1, getitem_3 # x_3 => relu_1 # Graph fragment: # %_low_memory_max_pool2d_with_offsets_1 : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%convolution_1, [2, 2], [2, 2], [0, 0], [1, 1], False), kwargs = {}) # %getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 1), kwargs = {}) # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%getitem_2,), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_relu_5 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_relu_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[262144], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i8', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_relu_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_relu_5(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 135424 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 64 x1 = (xindex // 64) % 23 x2 = (xindex // 1472) x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (128*x1) + (5888*x2)), xmask) tmp1 = tl.load(in_ptr0 + (64 + x0 + (128*x1) + (5888*x2)), xmask) tmp7 = tl.load(in_ptr0 + (2944 + x0 + (128*x1) + (5888*x2)), xmask) tmp12 = tl.load(in_ptr0 + (3008 + x0 + (128*x1) + (5888*x2)), xmask) tmp2 = tmp1 > tmp0 tmp3 = tl.full([1], 1, tl.int8) tmp4 = tl.full([1], 0, tl.int8) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp0) tmp8 = tmp7 > tmp6 tmp9 = tl.full([1], 2, tl.int8) tmp10 = tl.where(tmp8, tmp9, tmp5) tmp11 = triton_helpers.maximum(tmp7, tmp6) tmp13 = tmp12 > tmp11 tmp14 = tl.full([1], 3, tl.int8) tmp15 = tl.where(tmp13, tmp14, tmp10) tmp16 = triton_helpers.maximum(tmp12, tmp11) tmp17 = tl.full([1], 0, tl.int32) tmp18 = triton_helpers.maximum(tmp17, tmp16) tl.store(out_ptr0 + (x3), tmp15, xmask) tl.store(out_ptr1 + (x3), tmp18, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/vm/cvmtij7djyjxpxvgju7slqbaipji5f5nz3ocixx4tzb3zoqbrr3j.py # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.convolution] # Source node to ATen node mapping: # x_4 => convolution_2 # Graph fragment: # %convolution_2 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_1, %primals_6, %primals_7, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_6 = async_compile.triton('triton_poi_fused_convolution_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[262144], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_6', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 247808 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x2), tmp2, None) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/a2/ca2xazqvkvyqidyiiiw4ixs4ax4ahwzigfwnnkafigdyruyulcac.py # Topologically Sorted Source Nodes: [max_pool2d_2, x_5], Original ATen: [aten.max_pool2d_with_indices, aten.relu] # Source node to ATen node mapping: # max_pool2d_2 => _low_memory_max_pool2d_with_offsets_2, getitem_5 # x_5 => relu_2 # Graph fragment: # %_low_memory_max_pool2d_with_offsets_2 : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%convolution_2, [2, 2], [2, 2], [0, 0], [1, 1], False), kwargs = {}) # %getitem_5 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_2, 1), kwargs = {}) # %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%getitem_4,), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_relu_7 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_relu_7', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512, 128], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i8', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_relu_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_relu_7(in_ptr0, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 484 xnumel = 128 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 11 y1 = (yindex // 11) y5 = yindex y4 = (yindex // 121) y6 = yindex % 121 tmp0 = tl.load(in_ptr0 + (x2 + (256*y0) + (5632*y1)), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (128 + x2 + (256*y0) + (5632*y1)), xmask & ymask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2816 + x2 + (256*y0) + (5632*y1)), xmask & ymask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (2944 + x2 + (256*y0) + (5632*y1)), xmask & ymask, eviction_policy='evict_last') tmp2 = tmp1 > tmp0 tmp3 = tl.full([1, 1], 1, tl.int8) tmp4 = tl.full([1, 1], 0, tl.int8) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp0) tmp8 = tmp7 > tmp6 tmp9 = tl.full([1, 1], 2, tl.int8) tmp10 = tl.where(tmp8, tmp9, tmp5) tmp11 = triton_helpers.maximum(tmp7, tmp6) tmp13 = tmp12 > tmp11 tmp14 = tl.full([1, 1], 3, tl.int8) tmp15 = tl.where(tmp13, tmp14, tmp10) tmp16 = triton_helpers.maximum(tmp12, tmp11) tmp17 = tl.full([1, 1], 0, tl.int32) tmp18 = triton_helpers.maximum(tmp17, tmp16) tl.store(out_ptr0 + (x2 + (128*y5)), tmp15, xmask & ymask) tl.store(out_ptr1 + (y6 + (121*x2) + (15488*y4)), tmp18, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/bk/cbkmobcq4xugig5i4hltumomgc5bjkhuql4r3rz23mkiqfo33z4t.py # Topologically Sorted Source Nodes: [x_7], Original ATen: [aten.relu] # Source node to ATen node mapping: # x_7 => relu_3 # Graph fragment: # %add_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_9), kwargs = {}) # %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_1,), kwargs = {}) triton_poi_fused_relu_8 = async_compile.triton('triton_poi_fused_relu_8', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2048], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_8', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_8(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 2000 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 500 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/y3/cy3sy4qwyqyz63qaftf2sjogpitcadukpnborr6vso5qvhgtdvue.py # Topologically Sorted Source Nodes: [], Original ATen: [aten.threshold_backward] # Source node to ATen node mapping: # Graph fragment: # %le_2 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_2, 0), kwargs = {}) triton_poi_fused_threshold_backward_9 = async_compile.triton('triton_poi_fused_threshold_backward_9', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512, 128], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_threshold_backward_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_threshold_backward_9(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 512 xnumel = 121 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 128 y1 = (yindex // 128) tmp0 = tl.load(in_ptr0 + (x2 + (121*y3)), xmask & ymask, eviction_policy='evict_last') tmp1 = 0.0 tmp2 = tmp0 <= tmp1 tl.store(out_ptr0 + (y0 + (128*x2) + (15488*y1)), tmp2, xmask & ymask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13 = args args.clear() assert_size_stride(primals_1, (32, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_2, (32, ), (1, )) assert_size_stride(primals_3, (4, 1, 96, 96), (9216, 9216, 96, 1)) assert_size_stride(primals_4, (64, 32, 2, 2), (128, 4, 2, 1)) assert_size_stride(primals_5, (64, ), (1, )) assert_size_stride(primals_6, (128, 64, 2, 2), (256, 4, 2, 1)) assert_size_stride(primals_7, (128, ), (1, )) assert_size_stride(primals_8, (500, 15488), (15488, 1)) assert_size_stride(primals_9, (500, ), (1, )) assert_size_stride(primals_10, (500, 500), (500, 1)) assert_size_stride(primals_11, (500, ), (1, )) assert_size_stride(primals_12, (30, 500), (500, 1)) assert_size_stride(primals_13, (30, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 32, 2, 2), (128, 1, 64, 32), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] stream0 = get_raw_stream(0) triton_poi_fused_0.run(primals_4, buf0, 2048, 4, grid=grid(2048, 4), stream=stream0) del primals_4 buf1 = empty_strided_cuda((128, 64, 2, 2), (256, 1, 128, 64), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_1.run(primals_6, buf1, 8192, 4, grid=grid(8192, 4), stream=stream0) del primals_6 # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 32, 94, 94), (282752, 8836, 94, 1)) buf3 = empty_strided_cuda((4, 32, 94, 94), (282752, 1, 3008, 32), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] triton_poi_fused_convolution_2.run(buf2, primals_2, buf3, 128, 8836, grid=grid(128, 8836), stream=stream0) del buf2 del primals_2 buf4 = empty_strided_cuda((4, 32, 47, 47), (70688, 1, 1504, 32), torch.int8) buf5 = empty_strided_cuda((4, 32, 47, 47), (70688, 1, 1504, 32), torch.float32) # Topologically Sorted Source Nodes: [max_pool2d, x_1], Original ATen: [aten.max_pool2d_with_indices, aten.relu] triton_poi_fused_max_pool2d_with_indices_relu_3.run(buf3, buf4, buf5, 282752, grid=grid(282752), stream=stream0) # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution] buf6 = extern_kernels.convolution(buf5, buf0, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 64, 46, 46), (135424, 1, 2944, 64)) buf7 = buf6; del buf6 # reuse # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution] triton_poi_fused_convolution_4.run(buf7, primals_5, 541696, grid=grid(541696), stream=stream0) del primals_5 buf8 = empty_strided_cuda((4, 64, 23, 23), (33856, 1, 1472, 64), torch.int8) buf9 = empty_strided_cuda((4, 64, 23, 23), (33856, 1, 1472, 64), torch.float32) # Topologically Sorted Source Nodes: [max_pool2d_1, x_3], Original ATen: [aten.max_pool2d_with_indices, aten.relu] triton_poi_fused_max_pool2d_with_indices_relu_5.run(buf7, buf8, buf9, 135424, grid=grid(135424), stream=stream0) # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.convolution] buf10 = extern_kernels.convolution(buf9, buf1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf10, (4, 128, 22, 22), (61952, 1, 2816, 128)) buf11 = buf10; del buf10 # reuse # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.convolution] triton_poi_fused_convolution_6.run(buf11, primals_7, 247808, grid=grid(247808), stream=stream0) del primals_7 buf12 = empty_strided_cuda((4, 128, 11, 11), (15488, 1, 1408, 128), torch.int8) buf13 = empty_strided_cuda((4, 128, 11, 11), (15488, 121, 11, 1), torch.float32) # Topologically Sorted Source Nodes: [max_pool2d_2, x_5], Original ATen: [aten.max_pool2d_with_indices, aten.relu] triton_poi_fused_max_pool2d_with_indices_relu_7.run(buf11, buf12, buf13, 484, 128, grid=grid(484, 128), stream=stream0) buf14 = empty_strided_cuda((4, 500), (500, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf13, (4, 15488), (15488, 1), 0), reinterpret_tensor(primals_8, (15488, 500), (1, 15488), 0), out=buf14) buf15 = buf14; del buf14 # reuse # Topologically Sorted Source Nodes: [x_7], Original ATen: [aten.relu] triton_poi_fused_relu_8.run(buf15, primals_9, 2000, grid=grid(2000), stream=stream0) del primals_9 buf16 = empty_strided_cuda((4, 500), (500, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf15, reinterpret_tensor(primals_10, (500, 500), (1, 500), 0), out=buf16) buf17 = buf16; del buf16 # reuse # Topologically Sorted Source Nodes: [x_8], Original ATen: [aten.relu] triton_poi_fused_relu_8.run(buf17, primals_11, 2000, grid=grid(2000), stream=stream0) del primals_11 buf18 = empty_strided_cuda((4, 30), (30, 1), torch.float32) # Topologically Sorted Source Nodes: [x_9], Original ATen: [aten.addmm] extern_kernels.addmm(primals_13, buf17, reinterpret_tensor(primals_12, (500, 30), (1, 500), 0), alpha=1, beta=1, out=buf18) del primals_13 buf19 = empty_strided_cuda((4, 128, 11, 11), (15488, 1, 1408, 128), torch.bool) # Topologically Sorted Source Nodes: [], Original ATen: [aten.threshold_backward] triton_poi_fused_threshold_backward_9.run(buf13, buf19, 512, 121, grid=grid(512, 121), stream=stream0) return (buf18, primals_1, primals_3, buf0, buf1, buf3, buf4, buf5, buf7, buf8, buf9, buf11, buf12, reinterpret_tensor(buf13, (4, 15488), (15488, 1), 0), buf15, buf17, primals_12, primals_10, primals_8, buf19, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((32, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 1, 96, 96), (9216, 9216, 96, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((64, 32, 2, 2), (128, 4, 2, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((128, 64, 2, 2), (256, 4, 2, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((500, 15488), (15488, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((500, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((500, 500), (500, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((500, ), (1, ), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((30, 500), (500, 1), device='cuda:0', dtype=torch.float32) primals_13 = rand_strided((30, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class CnnNet(nn.Module): def __init__(self): super(CnnNet, self).__init__() self.conv1 = nn.Conv2d(1, 32, 3) self.pool1 = nn.MaxPool2d(2, 2) self.conv2 = nn.Conv2d(32, 64, 2) self.pool2 = nn.MaxPool2d(2, 2) self.conv3 = nn.Conv2d(64, 128, 2) self.pool3 = nn.MaxPool2d(2, 2) self.fc1 = nn.Linear(15488, 500) self.fc2 = nn.Linear(500, 500) self.fc3 = nn.Linear(500, 30) def forward(self, x): x = self.conv1(x) x = F.relu(self.pool1(x)) x = self.conv2(x) x = F.relu(self.pool2(x)) x = self.conv3(x) x = F.relu(self.pool3(x)) x = x.view(-1, 15488) x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) x = self.fc3(x) return x def get_inputs(): return [torch.rand([4, 1, 96, 96])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 32 y1 = yindex // 32 tmp0 = tl.load(in_ptr0 + (x2 + 4 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 32 * x2 + 128 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = yindex // 64 tmp0 = tl.load(in_ptr0 + (x2 + 4 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 64 * x2 + 256 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_convolution_2(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 128 xnumel = 8836 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 32 y1 = yindex // 32 tmp0 = tl.load(in_ptr0 + (x2 + 8836 * y3), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (y0 + 32 * x2 + 282752 * y1), tmp2, xmask & ymask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_relu_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 282752 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 32 x1 = xindex // 32 % 47 x2 = xindex // 1504 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1 + 6016 * x2), xmask) tmp1 = tl.load(in_ptr0 + (32 + x0 + 64 * x1 + 6016 * x2), xmask) tmp7 = tl.load(in_ptr0 + (3008 + x0 + 64 * x1 + 6016 * x2), xmask) tmp12 = tl.load(in_ptr0 + (3040 + x0 + 64 * x1 + 6016 * x2), xmask) tmp2 = tmp1 > tmp0 tmp3 = tl.full([1], 1, tl.int8) tmp4 = tl.full([1], 0, tl.int8) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp0) tmp8 = tmp7 > tmp6 tmp9 = tl.full([1], 2, tl.int8) tmp10 = tl.where(tmp8, tmp9, tmp5) tmp11 = triton_helpers.maximum(tmp7, tmp6) tmp13 = tmp12 > tmp11 tmp14 = tl.full([1], 3, tl.int8) tmp15 = tl.where(tmp13, tmp14, tmp10) tmp16 = triton_helpers.maximum(tmp12, tmp11) tmp17 = tl.full([1], 0, tl.int32) tmp18 = triton_helpers.maximum(tmp17, tmp16) tl.store(out_ptr0 + x3, tmp15, xmask) tl.store(out_ptr1 + x3, tmp18, xmask) @triton.jit def triton_poi_fused_convolution_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 541696 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_relu_5(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 135424 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 64 x1 = xindex // 64 % 23 x2 = xindex // 1472 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 128 * x1 + 5888 * x2), xmask) tmp1 = tl.load(in_ptr0 + (64 + x0 + 128 * x1 + 5888 * x2), xmask) tmp7 = tl.load(in_ptr0 + (2944 + x0 + 128 * x1 + 5888 * x2), xmask) tmp12 = tl.load(in_ptr0 + (3008 + x0 + 128 * x1 + 5888 * x2), xmask) tmp2 = tmp1 > tmp0 tmp3 = tl.full([1], 1, tl.int8) tmp4 = tl.full([1], 0, tl.int8) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp0) tmp8 = tmp7 > tmp6 tmp9 = tl.full([1], 2, tl.int8) tmp10 = tl.where(tmp8, tmp9, tmp5) tmp11 = triton_helpers.maximum(tmp7, tmp6) tmp13 = tmp12 > tmp11 tmp14 = tl.full([1], 3, tl.int8) tmp15 = tl.where(tmp13, tmp14, tmp10) tmp16 = triton_helpers.maximum(tmp12, tmp11) tmp17 = tl.full([1], 0, tl.int32) tmp18 = triton_helpers.maximum(tmp17, tmp16) tl.store(out_ptr0 + x3, tmp15, xmask) tl.store(out_ptr1 + x3, tmp18, xmask) @triton.jit def triton_poi_fused_convolution_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_relu_7(in_ptr0, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 484 xnumel = 128 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 11 y1 = yindex // 11 y5 = yindex y4 = yindex // 121 y6 = yindex % 121 tmp0 = tl.load(in_ptr0 + (x2 + 256 * y0 + 5632 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (128 + x2 + 256 * y0 + 5632 * y1), xmask & ymask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2816 + x2 + 256 * y0 + 5632 * y1), xmask & ymask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (2944 + x2 + 256 * y0 + 5632 * y1), xmask & ymask, eviction_policy='evict_last') tmp2 = tmp1 > tmp0 tmp3 = tl.full([1, 1], 1, tl.int8) tmp4 = tl.full([1, 1], 0, tl.int8) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp0) tmp8 = tmp7 > tmp6 tmp9 = tl.full([1, 1], 2, tl.int8) tmp10 = tl.where(tmp8, tmp9, tmp5) tmp11 = triton_helpers.maximum(tmp7, tmp6) tmp13 = tmp12 > tmp11 tmp14 = tl.full([1, 1], 3, tl.int8) tmp15 = tl.where(tmp13, tmp14, tmp10) tmp16 = triton_helpers.maximum(tmp12, tmp11) tmp17 = tl.full([1, 1], 0, tl.int32) tmp18 = triton_helpers.maximum(tmp17, tmp16) tl.store(out_ptr0 + (x2 + 128 * y5), tmp15, xmask & ymask) tl.store(out_ptr1 + (y6 + 121 * x2 + 15488 * y4), tmp18, xmask & ymask) @triton.jit def triton_poi_fused_relu_8(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 2000 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 500 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_threshold_backward_9(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 512 xnumel = 121 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 128 y1 = yindex // 128 tmp0 = tl.load(in_ptr0 + (x2 + 121 * y3), xmask & ymask, eviction_policy='evict_last') tmp1 = 0.0 tmp2 = tmp0 <= tmp1 tl.store(out_ptr0 + (y0 + 128 * x2 + 15488 * y1), tmp2, xmask & ymask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13) = args args.clear() assert_size_stride(primals_1, (32, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_2, (32,), (1,)) assert_size_stride(primals_3, (4, 1, 96, 96), (9216, 9216, 96, 1)) assert_size_stride(primals_4, (64, 32, 2, 2), (128, 4, 2, 1)) assert_size_stride(primals_5, (64,), (1,)) assert_size_stride(primals_6, (128, 64, 2, 2), (256, 4, 2, 1)) assert_size_stride(primals_7, (128,), (1,)) assert_size_stride(primals_8, (500, 15488), (15488, 1)) assert_size_stride(primals_9, (500,), (1,)) assert_size_stride(primals_10, (500, 500), (500, 1)) assert_size_stride(primals_11, (500,), (1,)) assert_size_stride(primals_12, (30, 500), (500, 1)) assert_size_stride(primals_13, (30,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 32, 2, 2), (128, 1, 64, 32), torch. float32) get_raw_stream(0) triton_poi_fused_0[grid(2048, 4)](primals_4, buf0, 2048, 4, XBLOCK= 4, YBLOCK=256, num_warps=4, num_stages=1) del primals_4 buf1 = empty_strided_cuda((128, 64, 2, 2), (256, 1, 128, 64), torch .float32) triton_poi_fused_1[grid(8192, 4)](primals_6, buf1, 8192, 4, XBLOCK= 4, YBLOCK=256, num_warps=4, num_stages=1) del primals_6 buf2 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 32, 94, 94), (282752, 8836, 94, 1)) buf3 = empty_strided_cuda((4, 32, 94, 94), (282752, 1, 3008, 32), torch.float32) triton_poi_fused_convolution_2[grid(128, 8836)](buf2, primals_2, buf3, 128, 8836, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del buf2 del primals_2 buf4 = empty_strided_cuda((4, 32, 47, 47), (70688, 1, 1504, 32), torch.int8) buf5 = empty_strided_cuda((4, 32, 47, 47), (70688, 1, 1504, 32), torch.float32) triton_poi_fused_max_pool2d_with_indices_relu_3[grid(282752)](buf3, buf4, buf5, 282752, XBLOCK=512, num_warps=8, num_stages=1) buf6 = extern_kernels.convolution(buf5, buf0, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 64, 46, 46), (135424, 1, 2944, 64)) buf7 = buf6 del buf6 triton_poi_fused_convolution_4[grid(541696)](buf7, primals_5, 541696, XBLOCK=1024, num_warps=4, num_stages=1) del primals_5 buf8 = empty_strided_cuda((4, 64, 23, 23), (33856, 1, 1472, 64), torch.int8) buf9 = empty_strided_cuda((4, 64, 23, 23), (33856, 1, 1472, 64), torch.float32) triton_poi_fused_max_pool2d_with_indices_relu_5[grid(135424)](buf7, buf8, buf9, 135424, XBLOCK=1024, num_warps=4, num_stages=1) buf10 = extern_kernels.convolution(buf9, buf1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf10, (4, 128, 22, 22), (61952, 1, 2816, 128)) buf11 = buf10 del buf10 triton_poi_fused_convolution_6[grid(247808)](buf11, primals_7, 247808, XBLOCK=512, num_warps=8, num_stages=1) del primals_7 buf12 = empty_strided_cuda((4, 128, 11, 11), (15488, 1, 1408, 128), torch.int8) buf13 = empty_strided_cuda((4, 128, 11, 11), (15488, 121, 11, 1), torch.float32) triton_poi_fused_max_pool2d_with_indices_relu_7[grid(484, 128)](buf11, buf12, buf13, 484, 128, XBLOCK=128, YBLOCK=4, num_warps=4, num_stages=1) buf14 = empty_strided_cuda((4, 500), (500, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf13, (4, 15488), (15488, 1), 0), reinterpret_tensor(primals_8, (15488, 500), (1, 15488), 0), out=buf14) buf15 = buf14 del buf14 triton_poi_fused_relu_8[grid(2000)](buf15, primals_9, 2000, XBLOCK= 256, num_warps=4, num_stages=1) del primals_9 buf16 = empty_strided_cuda((4, 500), (500, 1), torch.float32) extern_kernels.mm(buf15, reinterpret_tensor(primals_10, (500, 500), (1, 500), 0), out=buf16) buf17 = buf16 del buf16 triton_poi_fused_relu_8[grid(2000)](buf17, primals_11, 2000, XBLOCK =256, num_warps=4, num_stages=1) del primals_11 buf18 = empty_strided_cuda((4, 30), (30, 1), torch.float32) extern_kernels.addmm(primals_13, buf17, reinterpret_tensor( primals_12, (500, 30), (1, 500), 0), alpha=1, beta=1, out=buf18) del primals_13 buf19 = empty_strided_cuda((4, 128, 11, 11), (15488, 1, 1408, 128), torch.bool) triton_poi_fused_threshold_backward_9[grid(512, 121)](buf13, buf19, 512, 121, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) return (buf18, primals_1, primals_3, buf0, buf1, buf3, buf4, buf5, buf7, buf8, buf9, buf11, buf12, reinterpret_tensor(buf13, (4, 15488), ( 15488, 1), 0), buf15, buf17, primals_12, primals_10, primals_8, buf19) class CnnNetNew(nn.Module): def __init__(self): super(CnnNetNew, self).__init__() self.conv1 = nn.Conv2d(1, 32, 3) self.pool1 = nn.MaxPool2d(2, 2) self.conv2 = nn.Conv2d(32, 64, 2) self.pool2 = nn.MaxPool2d(2, 2) self.conv3 = nn.Conv2d(64, 128, 2) self.pool3 = nn.MaxPool2d(2, 2) self.fc1 = nn.Linear(15488, 500) self.fc2 = nn.Linear(500, 500) self.fc3 = nn.Linear(500, 30) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.conv3.weight primals_7 = self.conv3.bias primals_8 = self.fc1.weight primals_9 = self.fc1.bias primals_10 = self.fc2.weight primals_11 = self.fc2.bias primals_12 = self.fc3.weight primals_13 = self.fc3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13]) return output[0]
RoyHirsch/DeepLearningCourse
CnnNet
false
1,045
[ "MIT" ]
0
9036c0fdbb08b610524d7be991f8e4b490a82c6c
https://github.com/RoyHirsch/DeepLearningCourse/tree/9036c0fdbb08b610524d7be991f8e4b490a82c6c
import torch import torch.nn as nn import torch.nn.functional as F class Model(nn.Module): def __init__(self): super().__init__() self.conv1 = nn.Conv2d(1, 32, 3) self.pool1 = nn.MaxPool2d(2, 2) self.conv2 = nn.Conv2d(32, 64, 2) self.pool2 = nn.MaxPool2d(2, 2) self.conv3 = nn.Conv2d(64, 128, 2) self.pool3 = nn.MaxPool2d(2, 2) self.fc1 = nn.Linear(15488, 500) self.fc2 = nn.Linear(500, 500) self.fc3 = nn.Linear(500, 30) def forward(self, x): x = self.conv1(x) x = F.relu(self.pool1(x)) x = self.conv2(x) x = F.relu(self.pool2(x)) x = self.conv3(x) x = F.relu(self.pool3(x)) x = x.view(-1, 15488) x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) x = self.fc3(x) return x def get_inputs(): return [torch.rand([4, 1, 96, 96])] def get_init_inputs(): return []
BILM
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/xq/cxq2qmrgyky7dqwgqwftj6uhbj5vdl2rg7fomifs252vwbdicjhh.py # Topologically Sorted Source Nodes: [pos_sig, pos_sig_1, neg_sig, neg_sig_1, sum_sig, x], Original ATen: [aten.sigmoid, aten.max_pool2d_with_indices, aten.mul, aten.add] # Source node to ATen node mapping: # neg_sig => mul # neg_sig_1 => _low_memory_max_pool2d_with_offsets_1 # pos_sig => sigmoid # pos_sig_1 => _low_memory_max_pool2d_with_offsets # sum_sig => add # x => mul_1 # Graph fragment: # %sigmoid : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%arg0_1,), kwargs = {}) # %_low_memory_max_pool2d_with_offsets : [num_users=1] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%sigmoid, [3, 3], [1, 1], [1, 1], [1, 1], False), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, -1), kwargs = {}) # %_low_memory_max_pool2d_with_offsets_1 : [num_users=1] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%mul, [3, 3], [1, 1], [1, 1], [1, 1], False), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, %getitem_2), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %add), kwargs = {}) triton_poi_fused_add_max_pool2d_with_indices_mul_sigmoid_0 = async_compile.triton('triton_poi_fused_add_max_pool2d_with_indices_mul_sigmoid_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_max_pool2d_with_indices_mul_sigmoid_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 10, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_max_pool2d_with_indices_mul_sigmoid_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 4) % 4 x0 = xindex % 4 x3 = xindex tmp115 = tl.load(in_ptr0 + (x3), xmask) tmp0 = (-1) + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = (-1) + x0 tmp7 = tmp6 >= tmp1 tmp8 = tmp6 < tmp3 tmp9 = tmp7 & tmp8 tmp10 = tmp5 & tmp9 tmp11 = tl.load(in_ptr0 + ((-5) + x3), tmp10 & xmask, other=0.0) tmp12 = tl.sigmoid(tmp11) tmp13 = tl.full(tmp12.shape, float("-inf"), tmp12.dtype) tmp14 = tl.where(tmp10, tmp12, tmp13) tmp15 = x0 tmp16 = tmp15 >= tmp1 tmp17 = tmp15 < tmp3 tmp18 = tmp16 & tmp17 tmp19 = tmp5 & tmp18 tmp20 = tl.load(in_ptr0 + ((-4) + x3), tmp19 & xmask, other=0.0) tmp21 = tl.sigmoid(tmp20) tmp22 = tl.full(tmp21.shape, float("-inf"), tmp21.dtype) tmp23 = tl.where(tmp19, tmp21, tmp22) tmp24 = triton_helpers.maximum(tmp23, tmp14) tmp25 = 1 + x0 tmp26 = tmp25 >= tmp1 tmp27 = tmp25 < tmp3 tmp28 = tmp26 & tmp27 tmp29 = tmp5 & tmp28 tmp30 = tl.load(in_ptr0 + ((-3) + x3), tmp29 & xmask, other=0.0) tmp31 = tl.sigmoid(tmp30) tmp32 = tl.full(tmp31.shape, float("-inf"), tmp31.dtype) tmp33 = tl.where(tmp29, tmp31, tmp32) tmp34 = triton_helpers.maximum(tmp33, tmp24) tmp35 = x1 tmp36 = tmp35 >= tmp1 tmp37 = tmp35 < tmp3 tmp38 = tmp36 & tmp37 tmp39 = tmp38 & tmp9 tmp40 = tl.load(in_ptr0 + ((-1) + x3), tmp39 & xmask, other=0.0) tmp41 = tl.sigmoid(tmp40) tmp42 = tl.full(tmp41.shape, float("-inf"), tmp41.dtype) tmp43 = tl.where(tmp39, tmp41, tmp42) tmp44 = triton_helpers.maximum(tmp43, tmp34) tmp45 = tmp38 & tmp18 tmp46 = tl.load(in_ptr0 + (x3), tmp45 & xmask, other=0.0) tmp47 = tl.sigmoid(tmp46) tmp48 = tl.full(tmp47.shape, float("-inf"), tmp47.dtype) tmp49 = tl.where(tmp45, tmp47, tmp48) tmp50 = triton_helpers.maximum(tmp49, tmp44) tmp51 = tmp38 & tmp28 tmp52 = tl.load(in_ptr0 + (1 + x3), tmp51 & xmask, other=0.0) tmp53 = tl.sigmoid(tmp52) tmp54 = tl.full(tmp53.shape, float("-inf"), tmp53.dtype) tmp55 = tl.where(tmp51, tmp53, tmp54) tmp56 = triton_helpers.maximum(tmp55, tmp50) tmp57 = 1 + x1 tmp58 = tmp57 >= tmp1 tmp59 = tmp57 < tmp3 tmp60 = tmp58 & tmp59 tmp61 = tmp60 & tmp9 tmp62 = tl.load(in_ptr0 + (3 + x3), tmp61 & xmask, other=0.0) tmp63 = tl.sigmoid(tmp62) tmp64 = tl.full(tmp63.shape, float("-inf"), tmp63.dtype) tmp65 = tl.where(tmp61, tmp63, tmp64) tmp66 = triton_helpers.maximum(tmp65, tmp56) tmp67 = tmp60 & tmp18 tmp68 = tl.load(in_ptr0 + (4 + x3), tmp67 & xmask, other=0.0) tmp69 = tl.sigmoid(tmp68) tmp70 = tl.full(tmp69.shape, float("-inf"), tmp69.dtype) tmp71 = tl.where(tmp67, tmp69, tmp70) tmp72 = triton_helpers.maximum(tmp71, tmp66) tmp73 = tmp60 & tmp28 tmp74 = tl.load(in_ptr0 + (5 + x3), tmp73 & xmask, other=0.0) tmp75 = tl.sigmoid(tmp74) tmp76 = tl.full(tmp75.shape, float("-inf"), tmp75.dtype) tmp77 = tl.where(tmp73, tmp75, tmp76) tmp78 = triton_helpers.maximum(tmp77, tmp72) tmp79 = -1.0 tmp80 = tmp12 * tmp79 tmp81 = tl.full(tmp80.shape, float("-inf"), tmp80.dtype) tmp82 = tl.where(tmp10, tmp80, tmp81) tmp83 = tmp21 * tmp79 tmp84 = tl.full(tmp83.shape, float("-inf"), tmp83.dtype) tmp85 = tl.where(tmp19, tmp83, tmp84) tmp86 = triton_helpers.maximum(tmp85, tmp82) tmp87 = tmp31 * tmp79 tmp88 = tl.full(tmp87.shape, float("-inf"), tmp87.dtype) tmp89 = tl.where(tmp29, tmp87, tmp88) tmp90 = triton_helpers.maximum(tmp89, tmp86) tmp91 = tmp41 * tmp79 tmp92 = tl.full(tmp91.shape, float("-inf"), tmp91.dtype) tmp93 = tl.where(tmp39, tmp91, tmp92) tmp94 = triton_helpers.maximum(tmp93, tmp90) tmp95 = tmp47 * tmp79 tmp96 = tl.full(tmp95.shape, float("-inf"), tmp95.dtype) tmp97 = tl.where(tmp45, tmp95, tmp96) tmp98 = triton_helpers.maximum(tmp97, tmp94) tmp99 = tmp53 * tmp79 tmp100 = tl.full(tmp99.shape, float("-inf"), tmp99.dtype) tmp101 = tl.where(tmp51, tmp99, tmp100) tmp102 = triton_helpers.maximum(tmp101, tmp98) tmp103 = tmp63 * tmp79 tmp104 = tl.full(tmp103.shape, float("-inf"), tmp103.dtype) tmp105 = tl.where(tmp61, tmp103, tmp104) tmp106 = triton_helpers.maximum(tmp105, tmp102) tmp107 = tmp69 * tmp79 tmp108 = tl.full(tmp107.shape, float("-inf"), tmp107.dtype) tmp109 = tl.where(tmp67, tmp107, tmp108) tmp110 = triton_helpers.maximum(tmp109, tmp106) tmp111 = tmp75 * tmp79 tmp112 = tl.full(tmp111.shape, float("-inf"), tmp111.dtype) tmp113 = tl.where(tmp73, tmp111, tmp112) tmp114 = triton_helpers.maximum(tmp113, tmp110) tmp116 = tmp78 + tmp114 tmp117 = tmp115 * tmp116 tl.store(in_out_ptr0 + (x3), tmp117, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf2 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [pos_sig, pos_sig_1, neg_sig, neg_sig_1, sum_sig, x], Original ATen: [aten.sigmoid, aten.max_pool2d_with_indices, aten.mul, aten.add] stream0 = get_raw_stream(0) triton_poi_fused_add_max_pool2d_with_indices_mul_sigmoid_0.run(buf2, arg0_1, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class BILM(nn.Module): def __init__(self): super(BILM, self).__init__() self.maxpool1 = nn.MaxPool2d(kernel_size=3, stride=1, padding=1) self.maxpool2 = nn.MaxPool2d(kernel_size=3, stride=1, padding=1) def forward(self, feat): pos_sig = torch.sigmoid(feat) neg_sig = -1 * pos_sig pos_sig = self.maxpool1(pos_sig) neg_sig = self.maxpool2(neg_sig) sum_sig = pos_sig + neg_sig x = feat * sum_sig return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_max_pool2d_with_indices_mul_sigmoid_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 4 x0 = xindex % 4 x3 = xindex tmp115 = tl.load(in_ptr0 + x3, xmask) tmp0 = -1 + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = -1 + x0 tmp7 = tmp6 >= tmp1 tmp8 = tmp6 < tmp3 tmp9 = tmp7 & tmp8 tmp10 = tmp5 & tmp9 tmp11 = tl.load(in_ptr0 + (-5 + x3), tmp10 & xmask, other=0.0) tmp12 = tl.sigmoid(tmp11) tmp13 = tl.full(tmp12.shape, float('-inf'), tmp12.dtype) tmp14 = tl.where(tmp10, tmp12, tmp13) tmp15 = x0 tmp16 = tmp15 >= tmp1 tmp17 = tmp15 < tmp3 tmp18 = tmp16 & tmp17 tmp19 = tmp5 & tmp18 tmp20 = tl.load(in_ptr0 + (-4 + x3), tmp19 & xmask, other=0.0) tmp21 = tl.sigmoid(tmp20) tmp22 = tl.full(tmp21.shape, float('-inf'), tmp21.dtype) tmp23 = tl.where(tmp19, tmp21, tmp22) tmp24 = triton_helpers.maximum(tmp23, tmp14) tmp25 = 1 + x0 tmp26 = tmp25 >= tmp1 tmp27 = tmp25 < tmp3 tmp28 = tmp26 & tmp27 tmp29 = tmp5 & tmp28 tmp30 = tl.load(in_ptr0 + (-3 + x3), tmp29 & xmask, other=0.0) tmp31 = tl.sigmoid(tmp30) tmp32 = tl.full(tmp31.shape, float('-inf'), tmp31.dtype) tmp33 = tl.where(tmp29, tmp31, tmp32) tmp34 = triton_helpers.maximum(tmp33, tmp24) tmp35 = x1 tmp36 = tmp35 >= tmp1 tmp37 = tmp35 < tmp3 tmp38 = tmp36 & tmp37 tmp39 = tmp38 & tmp9 tmp40 = tl.load(in_ptr0 + (-1 + x3), tmp39 & xmask, other=0.0) tmp41 = tl.sigmoid(tmp40) tmp42 = tl.full(tmp41.shape, float('-inf'), tmp41.dtype) tmp43 = tl.where(tmp39, tmp41, tmp42) tmp44 = triton_helpers.maximum(tmp43, tmp34) tmp45 = tmp38 & tmp18 tmp46 = tl.load(in_ptr0 + x3, tmp45 & xmask, other=0.0) tmp47 = tl.sigmoid(tmp46) tmp48 = tl.full(tmp47.shape, float('-inf'), tmp47.dtype) tmp49 = tl.where(tmp45, tmp47, tmp48) tmp50 = triton_helpers.maximum(tmp49, tmp44) tmp51 = tmp38 & tmp28 tmp52 = tl.load(in_ptr0 + (1 + x3), tmp51 & xmask, other=0.0) tmp53 = tl.sigmoid(tmp52) tmp54 = tl.full(tmp53.shape, float('-inf'), tmp53.dtype) tmp55 = tl.where(tmp51, tmp53, tmp54) tmp56 = triton_helpers.maximum(tmp55, tmp50) tmp57 = 1 + x1 tmp58 = tmp57 >= tmp1 tmp59 = tmp57 < tmp3 tmp60 = tmp58 & tmp59 tmp61 = tmp60 & tmp9 tmp62 = tl.load(in_ptr0 + (3 + x3), tmp61 & xmask, other=0.0) tmp63 = tl.sigmoid(tmp62) tmp64 = tl.full(tmp63.shape, float('-inf'), tmp63.dtype) tmp65 = tl.where(tmp61, tmp63, tmp64) tmp66 = triton_helpers.maximum(tmp65, tmp56) tmp67 = tmp60 & tmp18 tmp68 = tl.load(in_ptr0 + (4 + x3), tmp67 & xmask, other=0.0) tmp69 = tl.sigmoid(tmp68) tmp70 = tl.full(tmp69.shape, float('-inf'), tmp69.dtype) tmp71 = tl.where(tmp67, tmp69, tmp70) tmp72 = triton_helpers.maximum(tmp71, tmp66) tmp73 = tmp60 & tmp28 tmp74 = tl.load(in_ptr0 + (5 + x3), tmp73 & xmask, other=0.0) tmp75 = tl.sigmoid(tmp74) tmp76 = tl.full(tmp75.shape, float('-inf'), tmp75.dtype) tmp77 = tl.where(tmp73, tmp75, tmp76) tmp78 = triton_helpers.maximum(tmp77, tmp72) tmp79 = -1.0 tmp80 = tmp12 * tmp79 tmp81 = tl.full(tmp80.shape, float('-inf'), tmp80.dtype) tmp82 = tl.where(tmp10, tmp80, tmp81) tmp83 = tmp21 * tmp79 tmp84 = tl.full(tmp83.shape, float('-inf'), tmp83.dtype) tmp85 = tl.where(tmp19, tmp83, tmp84) tmp86 = triton_helpers.maximum(tmp85, tmp82) tmp87 = tmp31 * tmp79 tmp88 = tl.full(tmp87.shape, float('-inf'), tmp87.dtype) tmp89 = tl.where(tmp29, tmp87, tmp88) tmp90 = triton_helpers.maximum(tmp89, tmp86) tmp91 = tmp41 * tmp79 tmp92 = tl.full(tmp91.shape, float('-inf'), tmp91.dtype) tmp93 = tl.where(tmp39, tmp91, tmp92) tmp94 = triton_helpers.maximum(tmp93, tmp90) tmp95 = tmp47 * tmp79 tmp96 = tl.full(tmp95.shape, float('-inf'), tmp95.dtype) tmp97 = tl.where(tmp45, tmp95, tmp96) tmp98 = triton_helpers.maximum(tmp97, tmp94) tmp99 = tmp53 * tmp79 tmp100 = tl.full(tmp99.shape, float('-inf'), tmp99.dtype) tmp101 = tl.where(tmp51, tmp99, tmp100) tmp102 = triton_helpers.maximum(tmp101, tmp98) tmp103 = tmp63 * tmp79 tmp104 = tl.full(tmp103.shape, float('-inf'), tmp103.dtype) tmp105 = tl.where(tmp61, tmp103, tmp104) tmp106 = triton_helpers.maximum(tmp105, tmp102) tmp107 = tmp69 * tmp79 tmp108 = tl.full(tmp107.shape, float('-inf'), tmp107.dtype) tmp109 = tl.where(tmp67, tmp107, tmp108) tmp110 = triton_helpers.maximum(tmp109, tmp106) tmp111 = tmp75 * tmp79 tmp112 = tl.full(tmp111.shape, float('-inf'), tmp111.dtype) tmp113 = tl.where(tmp73, tmp111, tmp112) tmp114 = triton_helpers.maximum(tmp113, tmp110) tmp116 = tmp78 + tmp114 tmp117 = tmp115 * tmp116 tl.store(in_out_ptr0 + x3, tmp117, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf2 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_add_max_pool2d_with_indices_mul_sigmoid_0[grid(256)]( buf2, arg0_1, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf2, class BILMNew(nn.Module): def __init__(self): super(BILMNew, self).__init__() self.maxpool1 = nn.MaxPool2d(kernel_size=3, stride=1, padding=1) self.maxpool2 = nn.MaxPool2d(kernel_size=3, stride=1, padding=1) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
SeunghwanByun/Real-Time-Road-Detection-Network
BILM
false
1,046
[ "MIT" ]
0
bc46615adef0e2b1a9a03dd4951559ca5849e6e1
https://github.com/SeunghwanByun/Real-Time-Road-Detection-Network/tree/bc46615adef0e2b1a9a03dd4951559ca5849e6e1
import torch import torch.nn as nn class Model(nn.Module): def __init__(self): super().__init__() self.maxpool1 = nn.MaxPool2d(kernel_size=3, stride=1, padding=1) self.maxpool2 = nn.MaxPool2d(kernel_size=3, stride=1, padding=1) def forward(self, feat): pos_sig = torch.sigmoid(feat) neg_sig = -1 * pos_sig pos_sig = self.maxpool1(pos_sig) neg_sig = self.maxpool2(neg_sig) sum_sig = pos_sig + neg_sig x = feat * sum_sig return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
KLDLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/5v/c5vtwldilipwi3r3wx5rsxypbvfmsav7s7xybl5bf72d3sckrknw.py # Topologically Sorted Source Nodes: [add, pow_1, sub, exp, sub_1, sum_1, mul], Original ATen: [aten.add, aten.pow, aten.sub, aten.exp, aten.sum, aten.mul] # Source node to ATen node mapping: # add => add # exp => exp # mul => mul # pow_1 => pow_1 # sub => sub # sub_1 => sub_1 # sum_1 => sum_1 # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg0_1, 1), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg1_1, 2), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %pow_1), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%arg0_1,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %exp), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%sub_1,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_1, -0.5), kwargs = {}) triton_per_fused_add_exp_mul_pow_sub_sum_0 = async_compile.triton('triton_per_fused_add_exp_mul_pow_sub_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_exp_mul_pow_sub_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_exp_mul_pow_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp3 = tl.load(in_ptr1 + (r0), None) tmp1 = 1.0 tmp2 = tmp0 + tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 - tmp4 tmp6 = tl_math.exp(tmp0) tmp7 = tmp5 - tmp6 tmp8 = tl.broadcast_to(tmp7, [RBLOCK]) tmp10 = triton_helpers.promote_to_tensor(tl.sum(tmp8, 0)) tmp11 = -0.5 tmp12 = tmp10 * tmp11 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp12, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [add, pow_1, sub, exp, sub_1, sum_1, mul], Original ATen: [aten.add, aten.pow, aten.sub, aten.exp, aten.sum, aten.mul] stream0 = get_raw_stream(0) triton_per_fused_add_exp_mul_pow_sub_sum_0.run(buf1, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.utils.data class KLDLoss(nn.Module): def forward(self, mu, logvar): return -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp()) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_exp_mul_pow_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp3 = tl.load(in_ptr1 + r0, None) tmp1 = 1.0 tmp2 = tmp0 + tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 - tmp4 tmp6 = tl_math.exp(tmp0) tmp7 = tmp5 - tmp6 tmp8 = tl.broadcast_to(tmp7, [RBLOCK]) tmp10 = triton_helpers.promote_to_tensor(tl.sum(tmp8, 0)) tmp11 = -0.5 tmp12 = tmp10 * tmp11 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp12, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_add_exp_mul_pow_sub_sum_0[grid(1)](buf1, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class KLDLossNew(nn.Module): def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
SebyakinAndrei/MichiGAN
KLDLoss
false
1,047
[ "MIT" ]
0
6584c9a106b33096f38e8f5b11d0320f7065fd26
https://github.com/SebyakinAndrei/MichiGAN/tree/6584c9a106b33096f38e8f5b11d0320f7065fd26
import torch import torch.nn as nn import torch.utils.data class Model(nn.Module): def forward(self, mu, logvar): return -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp()) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
AddCoords
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/nz/cnzr3enannjni75kec3qorz6jm6lyd5whz6u5l3ih55bgihwnb2u.py # Topologically Sorted Source Nodes: [ret], Original ATen: [aten.cat] # Source node to ATen node mapping: # ret => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%arg0_1, %device_put, %device_put_1], 1), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = (xindex // 16) % 6 x3 = (xindex // 96) x4 = xindex % 16 x1 = (xindex // 4) % 4 x0 = xindex % 4 x5 = xindex tmp0 = x2 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x4 + (16*x2) + (64*x3)), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 5, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = x1 tmp11 = tmp10.to(tl.float32) tmp12 = 0.3333333333333333 tmp13 = tmp11 * tmp12 tmp14 = 2.0 tmp15 = tmp13 * tmp14 tmp16 = 1.0 tmp17 = tmp15 - tmp16 tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype) tmp19 = tl.where(tmp9, tmp17, tmp18) tmp20 = tmp0 >= tmp7 tmp21 = tl.full([1], 6, tl.int64) tmp22 = tmp0 < tmp21 tmp23 = x0 tmp24 = tmp23.to(tl.float32) tmp25 = tmp24 * tmp12 tmp26 = tmp25 * tmp14 tmp27 = tmp26 - tmp16 tmp28 = tl.full(tmp27.shape, 0.0, tmp27.dtype) tmp29 = tl.where(tmp20, tmp27, tmp28) tmp30 = tl.where(tmp9, tmp19, tmp29) tmp31 = tl.where(tmp4, tmp5, tmp30) tl.store(out_ptr0 + (x5), tmp31, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 6, 4, 4), (96, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [ret], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(arg0_1, buf0, 384, grid=grid(384), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class AddCoords(nn.Module): def __init__(self, with_r=False): super().__init__() self.with_r = with_r def forward(self, input_tensor): """ Args: input_tensor: shape(batch, channel, x_dim, y_dim) """ batch_size, _, x_dim, y_dim = input_tensor.size() xx_channel = torch.arange(x_dim).repeat(1, y_dim, 1) yy_channel = torch.arange(y_dim).repeat(1, x_dim, 1).transpose(1, 2) xx_channel = xx_channel.float() / (x_dim - 1) yy_channel = yy_channel.float() / (y_dim - 1) xx_channel = xx_channel * 2 - 1 yy_channel = yy_channel * 2 - 1 xx_channel = xx_channel.repeat(batch_size, 1, 1, 1).transpose(2, 3) yy_channel = yy_channel.repeat(batch_size, 1, 1, 1).transpose(2, 3) ret = torch.cat([input_tensor, xx_channel.type_as(input_tensor), yy_channel.type_as(input_tensor)], dim=1) if self.with_r: rr = torch.sqrt(torch.pow(xx_channel.type_as(input_tensor) - 0.5, 2) + torch.pow(yy_channel.type_as(input_tensor) - 0.5, 2)) ret = torch.cat([ret, rr], dim=1) return ret def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex // 16 % 6 x3 = xindex // 96 x4 = xindex % 16 x1 = xindex // 4 % 4 x0 = xindex % 4 x5 = xindex tmp0 = x2 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x4 + 16 * x2 + 64 * x3), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 5, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = x1 tmp11 = tmp10.to(tl.float32) tmp12 = 0.3333333333333333 tmp13 = tmp11 * tmp12 tmp14 = 2.0 tmp15 = tmp13 * tmp14 tmp16 = 1.0 tmp17 = tmp15 - tmp16 tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype) tmp19 = tl.where(tmp9, tmp17, tmp18) tmp20 = tmp0 >= tmp7 tl.full([1], 6, tl.int64) tmp23 = x0 tmp24 = tmp23.to(tl.float32) tmp25 = tmp24 * tmp12 tmp26 = tmp25 * tmp14 tmp27 = tmp26 - tmp16 tmp28 = tl.full(tmp27.shape, 0.0, tmp27.dtype) tmp29 = tl.where(tmp20, tmp27, tmp28) tmp30 = tl.where(tmp9, tmp19, tmp29) tmp31 = tl.where(tmp4, tmp5, tmp30) tl.store(out_ptr0 + x5, tmp31, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 6, 4, 4), (96, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(384)](arg0_1, buf0, 384, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf0, class AddCoordsNew(nn.Module): def __init__(self, with_r=False): super().__init__() self.with_r = with_r def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
SeunghwanByun/Real-Time-Road-Detection-Network
AddCoords
false
1,048
[ "MIT" ]
0
bc46615adef0e2b1a9a03dd4951559ca5849e6e1
https://github.com/SeunghwanByun/Real-Time-Road-Detection-Network/tree/bc46615adef0e2b1a9a03dd4951559ca5849e6e1
import torch import torch.nn as nn class Model(nn.Module): def __init__(self, with_r=False): super().__init__() self.with_r = with_r def forward(self, input_tensor): """ Args: input_tensor: shape(batch, channel, x_dim, y_dim) """ batch_size, _, x_dim, y_dim = input_tensor.size() xx_channel = torch.arange(x_dim).repeat(1, y_dim, 1) yy_channel = torch.arange(y_dim).repeat(1, x_dim, 1).transpose(1, 2) xx_channel = xx_channel.float() / (x_dim - 1) yy_channel = yy_channel.float() / (y_dim - 1) xx_channel = xx_channel * 2 - 1 yy_channel = yy_channel * 2 - 1 xx_channel = xx_channel.repeat(batch_size, 1, 1, 1).transpose(2, 3) yy_channel = yy_channel.repeat(batch_size, 1, 1, 1).transpose(2, 3) ret = torch.cat([input_tensor, xx_channel.type_as(input_tensor), yy_channel.type_as(input_tensor)], dim=1) if self.with_r: rr = torch.sqrt(torch.pow(xx_channel.type_as(input_tensor) - 0.5, 2) + torch.pow(yy_channel.type_as(input_tensor) - 0.5, 2)) ret = torch.cat([ret, rr], dim=1) return ret def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
BCEDiceLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/ij/cijqe3u6tb2m4eqcjcyn2yvugi5jt3lklpnhpcyrzltvdudroq22.py # Topologically Sorted Source Nodes: [loss, pred], Original ATen: [aten._log_softmax, aten._softmax] # Source node to ATen node mapping: # loss => amax, sub # pred => amax_1, exp_1, sub_2 # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%arg0_1, [1], True), kwargs = {}) # %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %amax), kwargs = {}) # %amax_1 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%arg0_1, [1], True), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %amax_1), kwargs = {}) # %exp_1 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_2,), kwargs = {}) triton_poi_fused__log_softmax__softmax_0 = async_compile.triton('triton_poi_fused__log_softmax__softmax_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__log_softmax__softmax_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x2), tmp8, xmask) tl.store(out_ptr1 + (x2), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/wk/cwk2wao7opapqbjj7klnqrd6tgist3ts3nc5veryzhzstwpx7d4l.py # Topologically Sorted Source Nodes: [pred], Original ATen: [aten._softmax] # Source node to ATen node mapping: # pred => div, sum_3 # Graph fragment: # %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_1, [1], True), kwargs = {}) # %div : [num_users=4] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp_1, %sum_3), kwargs = {}) triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/de/cdeyqtmcoa2bhcp6bumzbremn4d7odew3le3fnk2ptjpoeuhteru.py # Topologically Sorted Source Nodes: [loss, loss_1, loss_bce, mul_14, mul_1, ne, valid_mask, valid_mask_1, mul_2, sum_1, mul_3, num, pow_1, pow_2, add_1, sum_2, den, truediv, loss_2, loss_3, total_loss, mul_4, valid_mask_2, mul_5, sum_3, mul_6, num_1, pow_3, pow_4, add_5, sum_4, den_1, truediv_1, loss_4, loss_5, total_loss_1, mul_7, valid_mask_3, mul_8, sum_5, mul_9, num_2, pow_5, pow_6, add_8, sum_6, den_2, truediv_2, loss_6, loss_7, total_loss_2, mul_10, valid_mask_4, mul_11, sum_7, mul_12, num_3, pow_7, pow_8, add_11, sum_8, den_3, truediv_3, loss_8, loss_9, total_loss_3, loss_10, loss_11, loss_dice, mul_15, add_13], Original ATen: [aten._log_softmax, aten.mul, aten.sum, aten.neg, aten.mean, aten.ne, aten._to_copy, aten.view, aten.add, aten.pow, aten.div, aten.rsub] # Source node to ATen node mapping: # add_1 => add_1 # add_11 => add_13 # add_13 => add_16 # add_5 => add_5 # add_8 => add_9 # den => add_2 # den_1 => add_6 # den_2 => add_10 # den_3 => add_14 # loss => exp, log, mul, neg, sub_1, sum_1, sum_2 # loss_1 => mean # loss_10 => div_5 # loss_11 => mean_5 # loss_2 => sub_3 # loss_3 => mean_1 # loss_4 => sub_4 # loss_5 => mean_2 # loss_6 => sub_5 # loss_7 => mean_3 # loss_8 => sub_6 # loss_9 => mean_4 # loss_bce => mul_1 # loss_dice => mul_14 # mul_1 => mul_2 # mul_10 => mul_11 # mul_11 => mul_12 # mul_12 => mul_13 # mul_14 => mul_15 # mul_15 => mul_16 # mul_2 => mul_3 # mul_3 => mul_4 # mul_4 => mul_5 # mul_5 => mul_6 # mul_6 => mul_7 # mul_7 => mul_8 # mul_8 => mul_9 # mul_9 => mul_10 # ne => ne # num => add # num_1 => add_4 # num_2 => add_8 # num_3 => add_12 # pow_1 => pow_1 # pow_2 => pow_2 # pow_3 => pow_3 # pow_4 => pow_4 # pow_5 => pow_5 # pow_6 => pow_6 # pow_7 => pow_7 # pow_8 => pow_8 # sum_1 => sum_4 # sum_2 => sum_5 # sum_3 => sum_6 # sum_4 => sum_7 # sum_5 => sum_8 # sum_6 => sum_9 # sum_7 => sum_10 # sum_8 => sum_11 # total_loss => add_3 # total_loss_1 => add_7 # total_loss_2 => add_11 # total_loss_3 => add_15 # truediv => div_1 # truediv_1 => div_2 # truediv_2 => div_3 # truediv_3 => div_4 # valid_mask => convert_element_type_2 # valid_mask_1 => view_2 # valid_mask_2 => view_5 # valid_mask_3 => view_8 # valid_mask_4 => view_11 # Graph fragment: # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %arg1_1), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%sum_2,), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%neg,), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean, 1.0), kwargs = {}) # %mul_15 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_1, 0.5), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view, %select_1), kwargs = {}) # %ne : [num_users=1] = call_function[target=torch.ops.aten.ne.Scalar](args = (%arg1_1, 255), kwargs = {}) # %convert_element_type_2 : [num_users=4] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%ne, torch.int64), kwargs = {}) # %view_2 : [num_users=1] = call_function[target=torch.ops.aten.reshape.default](args = (%convert_element_type_2, [4, -1]), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_2, %view_2), kwargs = {}) # %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_3, [1]), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_4, 2), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_4, 1), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%view, 2), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%select_1, 2), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%pow_1, %pow_2), kwargs = {}) # %sum_5 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%add_1, [1]), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_5, 1), kwargs = {}) # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add, %add_2), kwargs = {}) # %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %div_1), kwargs = {}) # %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sub_3,), kwargs = {}) # %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean_1, 0), kwargs = {}) # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_3, %select_3), kwargs = {}) # %view_5 : [num_users=1] = call_function[target=torch.ops.aten.reshape.default](args = (%convert_element_type_2, [4, -1]), kwargs = {}) # %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_5, %view_5), kwargs = {}) # %sum_6 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_6, [1]), kwargs = {}) # %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_6, 2), kwargs = {}) # %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_7, 1), kwargs = {}) # %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%view_3, 2), kwargs = {}) # %pow_4 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%select_3, 2), kwargs = {}) # %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%pow_3, %pow_4), kwargs = {}) # %sum_7 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%add_5, [1]), kwargs = {}) # %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_7, 1), kwargs = {}) # %div_2 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_4, %add_6), kwargs = {}) # %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %div_2), kwargs = {}) # %mean_2 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sub_4,), kwargs = {}) # %add_7 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_3, %mean_2), kwargs = {}) # %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_6, %select_5), kwargs = {}) # %view_8 : [num_users=1] = call_function[target=torch.ops.aten.reshape.default](args = (%convert_element_type_2, [4, -1]), kwargs = {}) # %mul_9 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_8, %view_8), kwargs = {}) # %sum_8 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_9, [1]), kwargs = {}) # %mul_10 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_8, 2), kwargs = {}) # %add_8 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_10, 1), kwargs = {}) # %pow_5 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%view_6, 2), kwargs = {}) # %pow_6 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%select_5, 2), kwargs = {}) # %add_9 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%pow_5, %pow_6), kwargs = {}) # %sum_9 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%add_9, [1]), kwargs = {}) # %add_10 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_9, 1), kwargs = {}) # %div_3 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_8, %add_10), kwargs = {}) # %sub_5 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %div_3), kwargs = {}) # %mean_3 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sub_5,), kwargs = {}) # %add_11 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_7, %mean_3), kwargs = {}) # %mul_11 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_9, %select_7), kwargs = {}) # %view_11 : [num_users=1] = call_function[target=torch.ops.aten.reshape.default](args = (%convert_element_type_2, [4, -1]), kwargs = {}) # %mul_12 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_11, %view_11), kwargs = {}) # %sum_10 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_12, [1]), kwargs = {}) # %mul_13 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_10, 2), kwargs = {}) # %add_12 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_13, 1), kwargs = {}) # %pow_7 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%view_9, 2), kwargs = {}) # %pow_8 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%select_7, 2), kwargs = {}) # %add_13 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%pow_7, %pow_8), kwargs = {}) # %sum_11 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%add_13, [1]), kwargs = {}) # %add_14 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_11, 1), kwargs = {}) # %div_4 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_12, %add_14), kwargs = {}) # %sub_6 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %div_4), kwargs = {}) # %mean_4 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sub_6,), kwargs = {}) # %add_15 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_11, %mean_4), kwargs = {}) # %div_5 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_15, 4), kwargs = {}) # %mean_5 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%div_5,), kwargs = {}) # %mul_14 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean_5, 1.0), kwargs = {}) # %mul_16 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_14, 0.5), kwargs = {}) # %add_16 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_15, %mul_16), kwargs = {}) triton_per_fused__log_softmax__to_copy_add_div_mean_mul_ne_neg_pow_rsub_sum_view_2 = async_compile.triton('triton_per_fused__log_softmax__to_copy_add_div_mean_mul_ne_neg_pow_rsub_sum_view_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 4], reduction_hint=ReductionHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {4: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=(4,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__log_softmax__to_copy_add_div_mean_mul_ne_neg_pow_rsub_sum_view_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 12, 'num_reduction': 5, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__log_softmax__to_copy_add_div_mean_mul_ne_neg_pow_rsub_sum_view_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 4 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (4*r0), None, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*r0)), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (2 + (4*r0)), None, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (3 + (4*r0)), None, eviction_policy='evict_last') tmp13 = tl.load(in_ptr1 + (4*r0), None, eviction_policy='evict_last') tmp16 = tl.load(in_ptr1 + (1 + (4*r0)), None, eviction_policy='evict_last') tmp20 = tl.load(in_ptr1 + (2 + (4*r0)), None, eviction_policy='evict_last') tmp24 = tl.load(in_ptr1 + (3 + (4*r0)), None, eviction_policy='evict_last') tmp31 = tl.load(in_ptr2 + (4*r0), None, eviction_policy='evict_last') tmp98 = tl.load(in_ptr2 + (1 + (4*r0)), None, eviction_policy='evict_last') tmp139 = tl.load(in_ptr2 + (2 + (4*r0)), None, eviction_policy='evict_last') tmp180 = tl.load(in_ptr2 + (3 + (4*r0)), None, eviction_policy='evict_last') tmp1 = tl_math.exp(tmp0) tmp3 = tl_math.exp(tmp2) tmp4 = tmp1 + tmp3 tmp6 = tl_math.exp(tmp5) tmp7 = tmp4 + tmp6 tmp9 = tl_math.exp(tmp8) tmp10 = tmp7 + tmp9 tmp11 = tl_math.log(tmp10) tmp12 = tmp0 - tmp11 tmp14 = tmp12 * tmp13 tmp15 = tmp2 - tmp11 tmp17 = tmp15 * tmp16 tmp18 = tmp14 + tmp17 tmp19 = tmp5 - tmp11 tmp21 = tmp19 * tmp20 tmp22 = tmp18 + tmp21 tmp23 = tmp8 - tmp11 tmp25 = tmp23 * tmp24 tmp26 = tmp22 + tmp25 tmp27 = -tmp26 tmp28 = tl.broadcast_to(tmp27, [XBLOCK, RBLOCK]) tmp30 = tl.sum(tmp28, 1)[:, None] tmp32 = tmp13.to(tl.int64) tmp33 = tl.full([1, 1], 0, tl.int64) tmp34 = triton_helpers.maximum(tmp32, tmp33) tmp35 = tl.full([1, 1], 3, tl.int64) tmp36 = triton_helpers.minimum(tmp34, tmp35) tmp37 = tmp36 == tmp33 tmp38 = tmp37.to(tl.int64) tmp39 = tmp38.to(tl.float32) tmp40 = tmp31 * tmp39 tmp41 = 255.0 tmp42 = tmp13 != tmp41 tmp43 = tmp42.to(tl.int64) tmp44 = tmp43.to(tl.float32) tmp45 = tmp40 * tmp44 tmp46 = tmp16.to(tl.int64) tmp47 = triton_helpers.maximum(tmp46, tmp33) tmp48 = triton_helpers.minimum(tmp47, tmp35) tmp49 = tmp48 == tmp33 tmp50 = tmp49.to(tl.int64) tmp51 = tmp50.to(tl.float32) tmp52 = tmp31 * tmp51 tmp53 = tmp16 != tmp41 tmp54 = tmp53.to(tl.int64) tmp55 = tmp54.to(tl.float32) tmp56 = tmp52 * tmp55 tmp57 = tmp45 + tmp56 tmp58 = tmp20.to(tl.int64) tmp59 = triton_helpers.maximum(tmp58, tmp33) tmp60 = triton_helpers.minimum(tmp59, tmp35) tmp61 = tmp60 == tmp33 tmp62 = tmp61.to(tl.int64) tmp63 = tmp62.to(tl.float32) tmp64 = tmp31 * tmp63 tmp65 = tmp20 != tmp41 tmp66 = tmp65.to(tl.int64) tmp67 = tmp66.to(tl.float32) tmp68 = tmp64 * tmp67 tmp69 = tmp57 + tmp68 tmp70 = tmp24.to(tl.int64) tmp71 = triton_helpers.maximum(tmp70, tmp33) tmp72 = triton_helpers.minimum(tmp71, tmp35) tmp73 = tmp72 == tmp33 tmp74 = tmp73.to(tl.int64) tmp75 = tmp74.to(tl.float32) tmp76 = tmp31 * tmp75 tmp77 = tmp24 != tmp41 tmp78 = tmp77.to(tl.int64) tmp79 = tmp78.to(tl.float32) tmp80 = tmp76 * tmp79 tmp81 = tmp69 + tmp80 tmp82 = tmp31 * tmp31 tmp83 = tmp38 * tmp38 tmp84 = tmp83.to(tl.float32) tmp85 = tmp82 + tmp84 tmp86 = tmp50 * tmp50 tmp87 = tmp86.to(tl.float32) tmp88 = tmp82 + tmp87 tmp89 = tmp85 + tmp88 tmp90 = tmp62 * tmp62 tmp91 = tmp90.to(tl.float32) tmp92 = tmp82 + tmp91 tmp93 = tmp89 + tmp92 tmp94 = tmp74 * tmp74 tmp95 = tmp94.to(tl.float32) tmp96 = tmp82 + tmp95 tmp97 = tmp93 + tmp96 tmp99 = tl.full([1, 1], 1, tl.int64) tmp100 = tmp36 == tmp99 tmp101 = tmp100.to(tl.int64) tmp102 = tmp101.to(tl.float32) tmp103 = tmp98 * tmp102 tmp104 = tmp103 * tmp44 tmp105 = tmp48 == tmp99 tmp106 = tmp105.to(tl.int64) tmp107 = tmp106.to(tl.float32) tmp108 = tmp98 * tmp107 tmp109 = tmp108 * tmp55 tmp110 = tmp104 + tmp109 tmp111 = tmp60 == tmp99 tmp112 = tmp111.to(tl.int64) tmp113 = tmp112.to(tl.float32) tmp114 = tmp98 * tmp113 tmp115 = tmp114 * tmp67 tmp116 = tmp110 + tmp115 tmp117 = tmp72 == tmp99 tmp118 = tmp117.to(tl.int64) tmp119 = tmp118.to(tl.float32) tmp120 = tmp98 * tmp119 tmp121 = tmp120 * tmp79 tmp122 = tmp116 + tmp121 tmp123 = tmp98 * tmp98 tmp124 = tmp101 * tmp101 tmp125 = tmp124.to(tl.float32) tmp126 = tmp123 + tmp125 tmp127 = tmp106 * tmp106 tmp128 = tmp127.to(tl.float32) tmp129 = tmp123 + tmp128 tmp130 = tmp126 + tmp129 tmp131 = tmp112 * tmp112 tmp132 = tmp131.to(tl.float32) tmp133 = tmp123 + tmp132 tmp134 = tmp130 + tmp133 tmp135 = tmp118 * tmp118 tmp136 = tmp135.to(tl.float32) tmp137 = tmp123 + tmp136 tmp138 = tmp134 + tmp137 tmp140 = tl.full([1, 1], 2, tl.int64) tmp141 = tmp36 == tmp140 tmp142 = tmp141.to(tl.int64) tmp143 = tmp142.to(tl.float32) tmp144 = tmp139 * tmp143 tmp145 = tmp144 * tmp44 tmp146 = tmp48 == tmp140 tmp147 = tmp146.to(tl.int64) tmp148 = tmp147.to(tl.float32) tmp149 = tmp139 * tmp148 tmp150 = tmp149 * tmp55 tmp151 = tmp145 + tmp150 tmp152 = tmp60 == tmp140 tmp153 = tmp152.to(tl.int64) tmp154 = tmp153.to(tl.float32) tmp155 = tmp139 * tmp154 tmp156 = tmp155 * tmp67 tmp157 = tmp151 + tmp156 tmp158 = tmp72 == tmp140 tmp159 = tmp158.to(tl.int64) tmp160 = tmp159.to(tl.float32) tmp161 = tmp139 * tmp160 tmp162 = tmp161 * tmp79 tmp163 = tmp157 + tmp162 tmp164 = tmp139 * tmp139 tmp165 = tmp142 * tmp142 tmp166 = tmp165.to(tl.float32) tmp167 = tmp164 + tmp166 tmp168 = tmp147 * tmp147 tmp169 = tmp168.to(tl.float32) tmp170 = tmp164 + tmp169 tmp171 = tmp167 + tmp170 tmp172 = tmp153 * tmp153 tmp173 = tmp172.to(tl.float32) tmp174 = tmp164 + tmp173 tmp175 = tmp171 + tmp174 tmp176 = tmp159 * tmp159 tmp177 = tmp176.to(tl.float32) tmp178 = tmp164 + tmp177 tmp179 = tmp175 + tmp178 tmp181 = tmp36 == tmp35 tmp182 = tmp181.to(tl.int64) tmp183 = tmp182.to(tl.float32) tmp184 = tmp180 * tmp183 tmp185 = tmp184 * tmp44 tmp186 = tmp48 == tmp35 tmp187 = tmp186.to(tl.int64) tmp188 = tmp187.to(tl.float32) tmp189 = tmp180 * tmp188 tmp190 = tmp189 * tmp55 tmp191 = tmp185 + tmp190 tmp192 = tmp60 == tmp35 tmp193 = tmp192.to(tl.int64) tmp194 = tmp193.to(tl.float32) tmp195 = tmp180 * tmp194 tmp196 = tmp195 * tmp67 tmp197 = tmp191 + tmp196 tmp198 = tmp72 == tmp35 tmp199 = tmp198.to(tl.int64) tmp200 = tmp199.to(tl.float32) tmp201 = tmp180 * tmp200 tmp202 = tmp201 * tmp79 tmp203 = tmp197 + tmp202 tmp204 = tmp180 * tmp180 tmp205 = tmp182 * tmp182 tmp206 = tmp205.to(tl.float32) tmp207 = tmp204 + tmp206 tmp208 = tmp187 * tmp187 tmp209 = tmp208.to(tl.float32) tmp210 = tmp204 + tmp209 tmp211 = tmp207 + tmp210 tmp212 = tmp193 * tmp193 tmp213 = tmp212.to(tl.float32) tmp214 = tmp204 + tmp213 tmp215 = tmp211 + tmp214 tmp216 = tmp199 * tmp199 tmp217 = tmp216.to(tl.float32) tmp218 = tmp204 + tmp217 tmp219 = tmp215 + tmp218 tmp220 = 2.0 tmp221 = tmp81 * tmp220 tmp222 = 1.0 tmp223 = tmp221 + tmp222 tmp224 = tmp97 + tmp222 tmp225 = tmp223 / tmp224 tmp226 = tmp222 - tmp225 tmp227 = tl.broadcast_to(tmp226, [XBLOCK, RBLOCK]) tmp229 = tl.sum(tmp227, 1)[:, None] tmp230 = tmp122 * tmp220 tmp231 = tmp230 + tmp222 tmp232 = tmp138 + tmp222 tmp233 = tmp231 / tmp232 tmp234 = tmp222 - tmp233 tmp235 = tl.broadcast_to(tmp234, [XBLOCK, RBLOCK]) tmp237 = tl.sum(tmp235, 1)[:, None] tmp238 = tmp163 * tmp220 tmp239 = tmp238 + tmp222 tmp240 = tmp179 + tmp222 tmp241 = tmp239 / tmp240 tmp242 = tmp222 - tmp241 tmp243 = tl.broadcast_to(tmp242, [XBLOCK, RBLOCK]) tmp245 = tl.sum(tmp243, 1)[:, None] tmp246 = tmp203 * tmp220 tmp247 = tmp246 + tmp222 tmp248 = tmp219 + tmp222 tmp249 = tmp247 / tmp248 tmp250 = tmp222 - tmp249 tmp251 = tl.broadcast_to(tmp250, [XBLOCK, RBLOCK]) tmp253 = tl.sum(tmp251, 1)[:, None] tmp254 = 4.0 tmp255 = tmp30 / tmp254 tmp256 = tmp255 * tmp222 tmp257 = 0.5 tmp258 = tmp256 * tmp257 tmp259 = tmp229 / tmp254 tmp260 = 0.0 tmp261 = tmp259 + tmp260 tmp262 = tmp237 / tmp254 tmp263 = tmp261 + tmp262 tmp264 = tmp245 / tmp254 tmp265 = tmp263 + tmp264 tmp266 = tmp253 / tmp254 tmp267 = tmp265 + tmp266 tmp268 = 0.25 tmp269 = tmp267 * tmp268 tmp270 = tmp269 / tmp222 tmp271 = tmp270 * tmp222 tmp272 = tmp271 * tmp257 tmp273 = tmp258 + tmp272 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp273, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [loss, pred], Original ATen: [aten._log_softmax, aten._softmax] stream0 = get_raw_stream(0) triton_poi_fused__log_softmax__softmax_0.run(arg0_1, buf0, buf2, 16, grid=grid(16), stream=stream0) del arg0_1 buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [pred], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf2, buf3, 16, grid=grid(16), stream=stream0) del buf2 buf1 = empty_strided_cuda((), (), torch.float32) buf16 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [loss, loss_1, loss_bce, mul_14, mul_1, ne, valid_mask, valid_mask_1, mul_2, sum_1, mul_3, num, pow_1, pow_2, add_1, sum_2, den, truediv, loss_2, loss_3, total_loss, mul_4, valid_mask_2, mul_5, sum_3, mul_6, num_1, pow_3, pow_4, add_5, sum_4, den_1, truediv_1, loss_4, loss_5, total_loss_1, mul_7, valid_mask_3, mul_8, sum_5, mul_9, num_2, pow_5, pow_6, add_8, sum_6, den_2, truediv_2, loss_6, loss_7, total_loss_2, mul_10, valid_mask_4, mul_11, sum_7, mul_12, num_3, pow_7, pow_8, add_11, sum_8, den_3, truediv_3, loss_8, loss_9, total_loss_3, loss_10, loss_11, loss_dice, mul_15, add_13], Original ATen: [aten._log_softmax, aten.mul, aten.sum, aten.neg, aten.mean, aten.ne, aten._to_copy, aten.view, aten.add, aten.pow, aten.div, aten.rsub] triton_per_fused__log_softmax__to_copy_add_div_mean_mul_ne_neg_pow_rsub_sum_view_2.run(buf16, buf0, arg1_1, buf3, 1, 4, grid=grid(1), stream=stream0) del arg1_1 del buf0 del buf3 return (buf16, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import functools import torch import numpy as np import torch.nn.functional as F import torch.nn as nn import torch._C import torch.serialization def _expand_onehot_labels(labels, label_weights, target_shape, ignore_index): """Expand onehot labels to match the size of prediction.""" bin_labels = labels.new_zeros(target_shape) valid_mask = (labels >= 0) & (labels != ignore_index) inds = torch.nonzero(valid_mask, as_tuple=True) if inds[0].numel() > 0: if labels.dim() == 3: bin_labels[inds[0], labels[valid_mask], inds[1], inds[2]] = 1 else: bin_labels[inds[0], labels[valid_mask]] = 1 valid_mask = valid_mask.unsqueeze(1).expand(target_shape).float() if label_weights is None: bin_label_weights = valid_mask else: bin_label_weights = label_weights.unsqueeze(1).expand(target_shape) bin_label_weights *= valid_mask return bin_labels, bin_label_weights def reduce_loss(loss, reduction): """Reduce loss as specified. Args: loss (Tensor): Elementwise loss tensor. reduction (str): Options are "none", "mean" and "sum". Return: Tensor: Reduced loss tensor. """ reduction_enum = F._Reduction.get_enum(reduction) if reduction_enum == 0: return loss elif reduction_enum == 1: return loss.mean() elif reduction_enum == 2: return loss.sum() def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None): """Apply element-wise weight and reduce loss. Args: loss (Tensor): Element-wise loss. weight (Tensor): Element-wise weights. reduction (str): Same as built-in losses of PyTorch. avg_factor (float): Avarage factor when computing the mean of losses. Returns: Tensor: Processed loss values. """ if weight is not None: assert weight.dim() == loss.dim() if weight.dim() > 1: assert weight.size(1) == 1 or weight.size(1) == loss.size(1) loss = loss * weight if avg_factor is None: loss = reduce_loss(loss, reduction) elif reduction == 'mean': loss = loss.sum() / avg_factor elif reduction != 'none': raise ValueError('avg_factor can not be used with reduction="sum"') return loss def binary_cross_entropy(pred, label, weight=None, reduction='mean', avg_factor=None, class_weight=None, ignore_index=255): """Calculate the binary CrossEntropy loss. Args: pred (torch.Tensor): The prediction with shape (N, 1). label (torch.Tensor): The learning label of the prediction. weight (torch.Tensor, optional): Sample-wise loss weight. reduction (str, optional): The method used to reduce the loss. Options are "none", "mean" and "sum". avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. class_weight (list[float], optional): The weight for each class. ignore_index (int | None): The label index to be ignored. Default: 255 Returns: torch.Tensor: The calculated loss """ if pred.dim() != label.dim(): assert pred.dim() == 2 and label.dim() == 1 or pred.dim( ) == 4 and label.dim( ) == 3, 'Only pred shape [N, C], label shape [N] or pred shape [N, C, H, W], label shape [N, H, W] are supported' label, weight = _expand_onehot_labels(label, weight, pred.shape, ignore_index) if weight is not None: weight = weight.float() loss = F.binary_cross_entropy_with_logits(pred, label.float(), pos_weight=class_weight, reduction='none') loss = weight_reduce_loss(loss, weight, reduction=reduction, avg_factor =avg_factor) return loss def cross_entropy(pred, label, weight=None, class_weight=None, reduction= 'mean', avg_factor=None, ignore_index=-100): """The wrapper function for :func:`F.cross_entropy`""" loss = F.cross_entropy(pred, label, weight=class_weight, reduction= 'none', ignore_index=ignore_index) if weight is not None: weight = weight.float() loss = weight_reduce_loss(loss, weight=weight, reduction=reduction, avg_factor=avg_factor) return loss def weighted_loss(loss_func): """Create a weighted version of a given loss function. To use this decorator, the loss function must have the signature like `loss_func(pred, target, **kwargs)`. The function only needs to compute element-wise loss without any reduction. This decorator will add weight and reduction arguments to the function. The decorated function will have the signature like `loss_func(pred, target, weight=None, reduction='mean', avg_factor=None, **kwargs)`. :Example: >>> import torch >>> @weighted_loss >>> def l1_loss(pred, target): >>> return (pred - target).abs() >>> pred = torch.Tensor([0, 2, 3]) >>> target = torch.Tensor([1, 1, 1]) >>> weight = torch.Tensor([1, 0, 1]) >>> l1_loss(pred, target) tensor(1.3333) >>> l1_loss(pred, target, weight) tensor(1.) >>> l1_loss(pred, target, reduction='none') tensor([1., 1., 2.]) >>> l1_loss(pred, target, weight, avg_factor=2) tensor(1.5000) """ @functools.wraps(loss_func) def wrapper(pred, target, weight=None, reduction='mean', avg_factor= None, **kwargs): loss = loss_func(pred, target, **kwargs) loss = weight_reduce_loss(loss, weight, reduction, avg_factor) return loss return wrapper @weighted_loss def binary_dice_loss(pred, target, valid_mask, smooth=1, exponent=2, **kwards): assert pred.shape[0] == target.shape[0] pred = pred.reshape(pred.shape[0], -1) target = target.reshape(target.shape[0], -1) valid_mask = valid_mask.reshape(valid_mask.shape[0], -1) num = torch.sum(torch.mul(pred, target) * valid_mask, dim=1) * 2 + smooth den = torch.sum(pred.pow(exponent) + target.pow(exponent), dim=1) + smooth return 1 - num / den @weighted_loss def dice_loss(pred, target, valid_mask, smooth=1, exponent=2, class_weight= None, ignore_index=255): assert pred.shape[0] == target.shape[0] total_loss = 0 num_classes = pred.shape[1] for i in range(num_classes): if i != ignore_index: dice_loss = binary_dice_loss(pred[:, i], target[..., i], valid_mask=valid_mask, smooth=smooth, exponent=exponent) if class_weight is not None: dice_loss *= class_weight[i] total_loss += dice_loss return total_loss / num_classes def get_class_weight(class_weight): """Get class weight for loss function. Args: class_weight (list[float] | str | None): If class_weight is a str, take it as a file name and read from it. """ if isinstance(class_weight, str): if class_weight.endswith('.npy'): class_weight = np.load(class_weight) else: class_weight = mmcv.load(class_weight) return class_weight def mask_cross_entropy(pred, target, label, reduction='mean', avg_factor= None, class_weight=None, ignore_index=None): """Calculate the CrossEntropy loss for masks. Args: pred (torch.Tensor): The prediction with shape (N, C), C is the number of classes. target (torch.Tensor): The learning label of the prediction. label (torch.Tensor): ``label`` indicates the class label of the mask' corresponding object. This will be used to select the mask in the of the class which the object belongs to when the mask prediction if not class-agnostic. reduction (str, optional): The method used to reduce the loss. Options are "none", "mean" and "sum". avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. class_weight (list[float], optional): The weight for each class. ignore_index (None): Placeholder, to be consistent with other loss. Default: None. Returns: torch.Tensor: The calculated loss """ assert ignore_index is None, 'BCE loss does not support ignore_index' assert reduction == 'mean' and avg_factor is None num_rois = pred.size()[0] inds = torch.arange(0, num_rois, dtype=torch.long, device=pred.device) pred_slice = pred[inds, label].squeeze(1) return F.binary_cross_entropy_with_logits(pred_slice, target, weight= class_weight, reduction='mean')[None] class BCEDiceLoss(nn.Module): """CrossEntropyLoss. Args: use_sigmoid (bool, optional): Whether the prediction uses sigmoid of softmax. Defaults to False. use_mask (bool, optional): Whether to use mask cross entropy loss. Defaults to False. reduction (str, optional): . Defaults to 'mean'. Options are "none", "mean" and "sum". class_weight (list[float] | str, optional): Weight of each class. If in str format, read them from a file. Defaults to None. loss_weight (float, optional): Weight of the loss. Defaults to 1.0. """ def __init__(self, use_sigmoid=False, use_mask=False, smooth=1, exponent=2, reduction='mean', class_weight=None, loss_weight=1.0, ignore_index=255, bce_weight=0.5, **kwards): super(BCEDiceLoss, self).__init__() assert use_sigmoid is False or use_mask is False self.use_sigmoid = use_sigmoid self.use_mask = use_mask self.smooth = smooth self.exponent = exponent self.reduction = reduction self.class_weight = get_class_weight(class_weight) self.loss_weight = loss_weight self.ignore_index = ignore_index self.bce_weight = bce_weight if self.use_sigmoid: self.cls_criterion = binary_cross_entropy elif self.use_mask: self.cls_criterion = mask_cross_entropy else: self.cls_criterion = cross_entropy def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None, **kwards): assert reduction_override in (None, 'none', 'mean', 'sum') reduction = (reduction_override if reduction_override else self. reduction) if self.class_weight is not None: class_weight = pred.new_tensor(self.class_weight) else: class_weight = None loss_bce = self.loss_weight * self.cls_criterion(pred, target, weight, class_weight=class_weight, reduction=reduction, avg_factor=avg_factor, **kwards) pred = F.softmax(pred, dim=1) num_classes = pred.shape[1] one_hot_target = F.one_hot(torch.clamp(target.long(), 0, num_classes - 1), num_classes=num_classes) valid_mask = (target != self.ignore_index).long() loss_dice = self.loss_weight * dice_loss(pred, one_hot_target, valid_mask=valid_mask, reduction=reduction, avg_factor= avg_factor, smooth=self.smooth, exponent=self.exponent, class_weight=class_weight, ignore_index=self.ignore_index) return loss_bce * self.bce_weight + loss_dice * (1 - self.bce_weight) def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import functools import numpy as np import torch.nn.functional as F import torch.nn as nn import torch._C import torch.serialization assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__log_softmax__softmax_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp8, xmask) tl.store(out_ptr1 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_per_fused__log_softmax__to_copy_add_div_mean_mul_ne_neg_pow_rsub_sum_view_2( in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK: tl. constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + 4 * r0, None, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * r0), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (2 + 4 * r0), None, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (3 + 4 * r0), None, eviction_policy='evict_last') tmp13 = tl.load(in_ptr1 + 4 * r0, None, eviction_policy='evict_last') tmp16 = tl.load(in_ptr1 + (1 + 4 * r0), None, eviction_policy='evict_last') tmp20 = tl.load(in_ptr1 + (2 + 4 * r0), None, eviction_policy='evict_last') tmp24 = tl.load(in_ptr1 + (3 + 4 * r0), None, eviction_policy='evict_last') tmp31 = tl.load(in_ptr2 + 4 * r0, None, eviction_policy='evict_last') tmp98 = tl.load(in_ptr2 + (1 + 4 * r0), None, eviction_policy='evict_last') tmp139 = tl.load(in_ptr2 + (2 + 4 * r0), None, eviction_policy='evict_last' ) tmp180 = tl.load(in_ptr2 + (3 + 4 * r0), None, eviction_policy='evict_last' ) tmp1 = tl_math.exp(tmp0) tmp3 = tl_math.exp(tmp2) tmp4 = tmp1 + tmp3 tmp6 = tl_math.exp(tmp5) tmp7 = tmp4 + tmp6 tmp9 = tl_math.exp(tmp8) tmp10 = tmp7 + tmp9 tmp11 = tl_math.log(tmp10) tmp12 = tmp0 - tmp11 tmp14 = tmp12 * tmp13 tmp15 = tmp2 - tmp11 tmp17 = tmp15 * tmp16 tmp18 = tmp14 + tmp17 tmp19 = tmp5 - tmp11 tmp21 = tmp19 * tmp20 tmp22 = tmp18 + tmp21 tmp23 = tmp8 - tmp11 tmp25 = tmp23 * tmp24 tmp26 = tmp22 + tmp25 tmp27 = -tmp26 tmp28 = tl.broadcast_to(tmp27, [XBLOCK, RBLOCK]) tmp30 = tl.sum(tmp28, 1)[:, None] tmp32 = tmp13.to(tl.int64) tmp33 = tl.full([1, 1], 0, tl.int64) tmp34 = triton_helpers.maximum(tmp32, tmp33) tmp35 = tl.full([1, 1], 3, tl.int64) tmp36 = triton_helpers.minimum(tmp34, tmp35) tmp37 = tmp36 == tmp33 tmp38 = tmp37.to(tl.int64) tmp39 = tmp38.to(tl.float32) tmp40 = tmp31 * tmp39 tmp41 = 255.0 tmp42 = tmp13 != tmp41 tmp43 = tmp42.to(tl.int64) tmp44 = tmp43.to(tl.float32) tmp45 = tmp40 * tmp44 tmp46 = tmp16.to(tl.int64) tmp47 = triton_helpers.maximum(tmp46, tmp33) tmp48 = triton_helpers.minimum(tmp47, tmp35) tmp49 = tmp48 == tmp33 tmp50 = tmp49.to(tl.int64) tmp51 = tmp50.to(tl.float32) tmp52 = tmp31 * tmp51 tmp53 = tmp16 != tmp41 tmp54 = tmp53.to(tl.int64) tmp55 = tmp54.to(tl.float32) tmp56 = tmp52 * tmp55 tmp57 = tmp45 + tmp56 tmp58 = tmp20.to(tl.int64) tmp59 = triton_helpers.maximum(tmp58, tmp33) tmp60 = triton_helpers.minimum(tmp59, tmp35) tmp61 = tmp60 == tmp33 tmp62 = tmp61.to(tl.int64) tmp63 = tmp62.to(tl.float32) tmp64 = tmp31 * tmp63 tmp65 = tmp20 != tmp41 tmp66 = tmp65.to(tl.int64) tmp67 = tmp66.to(tl.float32) tmp68 = tmp64 * tmp67 tmp69 = tmp57 + tmp68 tmp70 = tmp24.to(tl.int64) tmp71 = triton_helpers.maximum(tmp70, tmp33) tmp72 = triton_helpers.minimum(tmp71, tmp35) tmp73 = tmp72 == tmp33 tmp74 = tmp73.to(tl.int64) tmp75 = tmp74.to(tl.float32) tmp76 = tmp31 * tmp75 tmp77 = tmp24 != tmp41 tmp78 = tmp77.to(tl.int64) tmp79 = tmp78.to(tl.float32) tmp80 = tmp76 * tmp79 tmp81 = tmp69 + tmp80 tmp82 = tmp31 * tmp31 tmp83 = tmp38 * tmp38 tmp84 = tmp83.to(tl.float32) tmp85 = tmp82 + tmp84 tmp86 = tmp50 * tmp50 tmp87 = tmp86.to(tl.float32) tmp88 = tmp82 + tmp87 tmp89 = tmp85 + tmp88 tmp90 = tmp62 * tmp62 tmp91 = tmp90.to(tl.float32) tmp92 = tmp82 + tmp91 tmp93 = tmp89 + tmp92 tmp94 = tmp74 * tmp74 tmp95 = tmp94.to(tl.float32) tmp96 = tmp82 + tmp95 tmp97 = tmp93 + tmp96 tmp99 = tl.full([1, 1], 1, tl.int64) tmp100 = tmp36 == tmp99 tmp101 = tmp100.to(tl.int64) tmp102 = tmp101.to(tl.float32) tmp103 = tmp98 * tmp102 tmp104 = tmp103 * tmp44 tmp105 = tmp48 == tmp99 tmp106 = tmp105.to(tl.int64) tmp107 = tmp106.to(tl.float32) tmp108 = tmp98 * tmp107 tmp109 = tmp108 * tmp55 tmp110 = tmp104 + tmp109 tmp111 = tmp60 == tmp99 tmp112 = tmp111.to(tl.int64) tmp113 = tmp112.to(tl.float32) tmp114 = tmp98 * tmp113 tmp115 = tmp114 * tmp67 tmp116 = tmp110 + tmp115 tmp117 = tmp72 == tmp99 tmp118 = tmp117.to(tl.int64) tmp119 = tmp118.to(tl.float32) tmp120 = tmp98 * tmp119 tmp121 = tmp120 * tmp79 tmp122 = tmp116 + tmp121 tmp123 = tmp98 * tmp98 tmp124 = tmp101 * tmp101 tmp125 = tmp124.to(tl.float32) tmp126 = tmp123 + tmp125 tmp127 = tmp106 * tmp106 tmp128 = tmp127.to(tl.float32) tmp129 = tmp123 + tmp128 tmp130 = tmp126 + tmp129 tmp131 = tmp112 * tmp112 tmp132 = tmp131.to(tl.float32) tmp133 = tmp123 + tmp132 tmp134 = tmp130 + tmp133 tmp135 = tmp118 * tmp118 tmp136 = tmp135.to(tl.float32) tmp137 = tmp123 + tmp136 tmp138 = tmp134 + tmp137 tmp140 = tl.full([1, 1], 2, tl.int64) tmp141 = tmp36 == tmp140 tmp142 = tmp141.to(tl.int64) tmp143 = tmp142.to(tl.float32) tmp144 = tmp139 * tmp143 tmp145 = tmp144 * tmp44 tmp146 = tmp48 == tmp140 tmp147 = tmp146.to(tl.int64) tmp148 = tmp147.to(tl.float32) tmp149 = tmp139 * tmp148 tmp150 = tmp149 * tmp55 tmp151 = tmp145 + tmp150 tmp152 = tmp60 == tmp140 tmp153 = tmp152.to(tl.int64) tmp154 = tmp153.to(tl.float32) tmp155 = tmp139 * tmp154 tmp156 = tmp155 * tmp67 tmp157 = tmp151 + tmp156 tmp158 = tmp72 == tmp140 tmp159 = tmp158.to(tl.int64) tmp160 = tmp159.to(tl.float32) tmp161 = tmp139 * tmp160 tmp162 = tmp161 * tmp79 tmp163 = tmp157 + tmp162 tmp164 = tmp139 * tmp139 tmp165 = tmp142 * tmp142 tmp166 = tmp165.to(tl.float32) tmp167 = tmp164 + tmp166 tmp168 = tmp147 * tmp147 tmp169 = tmp168.to(tl.float32) tmp170 = tmp164 + tmp169 tmp171 = tmp167 + tmp170 tmp172 = tmp153 * tmp153 tmp173 = tmp172.to(tl.float32) tmp174 = tmp164 + tmp173 tmp175 = tmp171 + tmp174 tmp176 = tmp159 * tmp159 tmp177 = tmp176.to(tl.float32) tmp178 = tmp164 + tmp177 tmp179 = tmp175 + tmp178 tmp181 = tmp36 == tmp35 tmp182 = tmp181.to(tl.int64) tmp183 = tmp182.to(tl.float32) tmp184 = tmp180 * tmp183 tmp185 = tmp184 * tmp44 tmp186 = tmp48 == tmp35 tmp187 = tmp186.to(tl.int64) tmp188 = tmp187.to(tl.float32) tmp189 = tmp180 * tmp188 tmp190 = tmp189 * tmp55 tmp191 = tmp185 + tmp190 tmp192 = tmp60 == tmp35 tmp193 = tmp192.to(tl.int64) tmp194 = tmp193.to(tl.float32) tmp195 = tmp180 * tmp194 tmp196 = tmp195 * tmp67 tmp197 = tmp191 + tmp196 tmp198 = tmp72 == tmp35 tmp199 = tmp198.to(tl.int64) tmp200 = tmp199.to(tl.float32) tmp201 = tmp180 * tmp200 tmp202 = tmp201 * tmp79 tmp203 = tmp197 + tmp202 tmp204 = tmp180 * tmp180 tmp205 = tmp182 * tmp182 tmp206 = tmp205.to(tl.float32) tmp207 = tmp204 + tmp206 tmp208 = tmp187 * tmp187 tmp209 = tmp208.to(tl.float32) tmp210 = tmp204 + tmp209 tmp211 = tmp207 + tmp210 tmp212 = tmp193 * tmp193 tmp213 = tmp212.to(tl.float32) tmp214 = tmp204 + tmp213 tmp215 = tmp211 + tmp214 tmp216 = tmp199 * tmp199 tmp217 = tmp216.to(tl.float32) tmp218 = tmp204 + tmp217 tmp219 = tmp215 + tmp218 tmp220 = 2.0 tmp221 = tmp81 * tmp220 tmp222 = 1.0 tmp223 = tmp221 + tmp222 tmp224 = tmp97 + tmp222 tmp225 = tmp223 / tmp224 tmp226 = tmp222 - tmp225 tmp227 = tl.broadcast_to(tmp226, [XBLOCK, RBLOCK]) tmp229 = tl.sum(tmp227, 1)[:, None] tmp230 = tmp122 * tmp220 tmp231 = tmp230 + tmp222 tmp232 = tmp138 + tmp222 tmp233 = tmp231 / tmp232 tmp234 = tmp222 - tmp233 tmp235 = tl.broadcast_to(tmp234, [XBLOCK, RBLOCK]) tmp237 = tl.sum(tmp235, 1)[:, None] tmp238 = tmp163 * tmp220 tmp239 = tmp238 + tmp222 tmp240 = tmp179 + tmp222 tmp241 = tmp239 / tmp240 tmp242 = tmp222 - tmp241 tmp243 = tl.broadcast_to(tmp242, [XBLOCK, RBLOCK]) tmp245 = tl.sum(tmp243, 1)[:, None] tmp246 = tmp203 * tmp220 tmp247 = tmp246 + tmp222 tmp248 = tmp219 + tmp222 tmp249 = tmp247 / tmp248 tmp250 = tmp222 - tmp249 tmp251 = tl.broadcast_to(tmp250, [XBLOCK, RBLOCK]) tmp253 = tl.sum(tmp251, 1)[:, None] tmp254 = 4.0 tmp255 = tmp30 / tmp254 tmp256 = tmp255 * tmp222 tmp257 = 0.5 tmp258 = tmp256 * tmp257 tmp259 = tmp229 / tmp254 tmp260 = 0.0 tmp261 = tmp259 + tmp260 tmp262 = tmp237 / tmp254 tmp263 = tmp261 + tmp262 tmp264 = tmp245 / tmp254 tmp265 = tmp263 + tmp264 tmp266 = tmp253 / tmp254 tmp267 = tmp265 + tmp266 tmp268 = 0.25 tmp269 = tmp267 * tmp268 tmp270 = tmp269 / tmp222 tmp271 = tmp270 * tmp222 tmp272 = tmp271 * tmp257 tmp273 = tmp258 + tmp272 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp273, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__log_softmax__softmax_0[grid(16)](arg0_1, buf0, buf2, 16, XBLOCK=16, num_warps=1, num_stages=1) del arg0_1 buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused__softmax_1[grid(16)](buf2, buf3, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf2 buf1 = empty_strided_cuda((), (), torch.float32) buf16 = buf1 del buf1 triton_per_fused__log_softmax__to_copy_add_div_mean_mul_ne_neg_pow_rsub_sum_view_2[ grid(1)](buf16, buf0, arg1_1, buf3, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) del arg1_1 del buf0 del buf3 return buf16, def _expand_onehot_labels(labels, label_weights, target_shape, ignore_index): """Expand onehot labels to match the size of prediction.""" bin_labels = labels.new_zeros(target_shape) valid_mask = (labels >= 0) & (labels != ignore_index) inds = torch.nonzero(valid_mask, as_tuple=True) if inds[0].numel() > 0: if labels.dim() == 3: bin_labels[inds[0], labels[valid_mask], inds[1], inds[2]] = 1 else: bin_labels[inds[0], labels[valid_mask]] = 1 valid_mask = valid_mask.unsqueeze(1).expand(target_shape).float() if label_weights is None: bin_label_weights = valid_mask else: bin_label_weights = label_weights.unsqueeze(1).expand(target_shape) bin_label_weights *= valid_mask return bin_labels, bin_label_weights def reduce_loss(loss, reduction): """Reduce loss as specified. Args: loss (Tensor): Elementwise loss tensor. reduction (str): Options are "none", "mean" and "sum". Return: Tensor: Reduced loss tensor. """ reduction_enum = F._Reduction.get_enum(reduction) if reduction_enum == 0: return loss elif reduction_enum == 1: return loss.mean() elif reduction_enum == 2: return loss.sum() def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None): """Apply element-wise weight and reduce loss. Args: loss (Tensor): Element-wise loss. weight (Tensor): Element-wise weights. reduction (str): Same as built-in losses of PyTorch. avg_factor (float): Avarage factor when computing the mean of losses. Returns: Tensor: Processed loss values. """ if weight is not None: assert weight.dim() == loss.dim() if weight.dim() > 1: assert weight.size(1) == 1 or weight.size(1) == loss.size(1) loss = loss * weight if avg_factor is None: loss = reduce_loss(loss, reduction) elif reduction == 'mean': loss = loss.sum() / avg_factor elif reduction != 'none': raise ValueError('avg_factor can not be used with reduction="sum"') return loss def binary_cross_entropy(pred, label, weight=None, reduction='mean', avg_factor=None, class_weight=None, ignore_index=255): """Calculate the binary CrossEntropy loss. Args: pred (torch.Tensor): The prediction with shape (N, 1). label (torch.Tensor): The learning label of the prediction. weight (torch.Tensor, optional): Sample-wise loss weight. reduction (str, optional): The method used to reduce the loss. Options are "none", "mean" and "sum". avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. class_weight (list[float], optional): The weight for each class. ignore_index (int | None): The label index to be ignored. Default: 255 Returns: torch.Tensor: The calculated loss """ if pred.dim() != label.dim(): assert pred.dim() == 2 and label.dim() == 1 or pred.dim( ) == 4 and label.dim( ) == 3, 'Only pred shape [N, C], label shape [N] or pred shape [N, C, H, W], label shape [N, H, W] are supported' label, weight = _expand_onehot_labels(label, weight, pred.shape, ignore_index) if weight is not None: weight = weight.float() loss = F.binary_cross_entropy_with_logits(pred, label.float(), pos_weight=class_weight, reduction='none') loss = weight_reduce_loss(loss, weight, reduction=reduction, avg_factor =avg_factor) return loss def cross_entropy(pred, label, weight=None, class_weight=None, reduction= 'mean', avg_factor=None, ignore_index=-100): """The wrapper function for :func:`F.cross_entropy`""" loss = F.cross_entropy(pred, label, weight=class_weight, reduction= 'none', ignore_index=ignore_index) if weight is not None: weight = weight.float() loss = weight_reduce_loss(loss, weight=weight, reduction=reduction, avg_factor=avg_factor) return loss def weighted_loss(loss_func): """Create a weighted version of a given loss function. To use this decorator, the loss function must have the signature like `loss_func(pred, target, **kwargs)`. The function only needs to compute element-wise loss without any reduction. This decorator will add weight and reduction arguments to the function. The decorated function will have the signature like `loss_func(pred, target, weight=None, reduction='mean', avg_factor=None, **kwargs)`. :Example: >>> import torch >>> @weighted_loss >>> def l1_loss(pred, target): >>> return (pred - target).abs() >>> pred = torch.Tensor([0, 2, 3]) >>> target = torch.Tensor([1, 1, 1]) >>> weight = torch.Tensor([1, 0, 1]) >>> l1_loss(pred, target) tensor(1.3333) >>> l1_loss(pred, target, weight) tensor(1.) >>> l1_loss(pred, target, reduction='none') tensor([1., 1., 2.]) >>> l1_loss(pred, target, weight, avg_factor=2) tensor(1.5000) """ @functools.wraps(loss_func) def wrapper(pred, target, weight=None, reduction='mean', avg_factor= None, **kwargs): loss = loss_func(pred, target, **kwargs) loss = weight_reduce_loss(loss, weight, reduction, avg_factor) return loss return wrapper @weighted_loss def binary_dice_loss(pred, target, valid_mask, smooth=1, exponent=2, **kwards): assert pred.shape[0] == target.shape[0] pred = pred.reshape(pred.shape[0], -1) target = target.reshape(target.shape[0], -1) valid_mask = valid_mask.reshape(valid_mask.shape[0], -1) num = torch.sum(torch.mul(pred, target) * valid_mask, dim=1) * 2 + smooth den = torch.sum(pred.pow(exponent) + target.pow(exponent), dim=1) + smooth return 1 - num / den @weighted_loss def dice_loss(pred, target, valid_mask, smooth=1, exponent=2, class_weight= None, ignore_index=255): assert pred.shape[0] == target.shape[0] total_loss = 0 num_classes = pred.shape[1] for i in range(num_classes): if i != ignore_index: dice_loss = binary_dice_loss(pred[:, i], target[..., i], valid_mask=valid_mask, smooth=smooth, exponent=exponent) if class_weight is not None: dice_loss *= class_weight[i] total_loss += dice_loss return total_loss / num_classes def get_class_weight(class_weight): """Get class weight for loss function. Args: class_weight (list[float] | str | None): If class_weight is a str, take it as a file name and read from it. """ if isinstance(class_weight, str): if class_weight.endswith('.npy'): class_weight = np.load(class_weight) else: class_weight = mmcv.load(class_weight) return class_weight def mask_cross_entropy(pred, target, label, reduction='mean', avg_factor= None, class_weight=None, ignore_index=None): """Calculate the CrossEntropy loss for masks. Args: pred (torch.Tensor): The prediction with shape (N, C), C is the number of classes. target (torch.Tensor): The learning label of the prediction. label (torch.Tensor): ``label`` indicates the class label of the mask' corresponding object. This will be used to select the mask in the of the class which the object belongs to when the mask prediction if not class-agnostic. reduction (str, optional): The method used to reduce the loss. Options are "none", "mean" and "sum". avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. class_weight (list[float], optional): The weight for each class. ignore_index (None): Placeholder, to be consistent with other loss. Default: None. Returns: torch.Tensor: The calculated loss """ assert ignore_index is None, 'BCE loss does not support ignore_index' assert reduction == 'mean' and avg_factor is None num_rois = pred.size()[0] inds = torch.arange(0, num_rois, dtype=torch.long, device=pred.device) pred_slice = pred[inds, label].squeeze(1) return F.binary_cross_entropy_with_logits(pred_slice, target, weight= class_weight, reduction='mean')[None] class BCEDiceLossNew(nn.Module): """CrossEntropyLoss. Args: use_sigmoid (bool, optional): Whether the prediction uses sigmoid of softmax. Defaults to False. use_mask (bool, optional): Whether to use mask cross entropy loss. Defaults to False. reduction (str, optional): . Defaults to 'mean'. Options are "none", "mean" and "sum". class_weight (list[float] | str, optional): Weight of each class. If in str format, read them from a file. Defaults to None. loss_weight (float, optional): Weight of the loss. Defaults to 1.0. """ def __init__(self, use_sigmoid=False, use_mask=False, smooth=1, exponent=2, reduction='mean', class_weight=None, loss_weight=1.0, ignore_index=255, bce_weight=0.5, **kwards): super(BCEDiceLossNew, self).__init__() assert use_sigmoid is False or use_mask is False self.use_sigmoid = use_sigmoid self.use_mask = use_mask self.smooth = smooth self.exponent = exponent self.reduction = reduction self.class_weight = get_class_weight(class_weight) self.loss_weight = loss_weight self.ignore_index = ignore_index self.bce_weight = bce_weight if self.use_sigmoid: self.cls_criterion = binary_cross_entropy elif self.use_mask: self.cls_criterion = mask_cross_entropy else: self.cls_criterion = cross_entropy def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
SeHwanJoo/mmsegmentation_body
BCEDiceLoss
false
1,049
[ "Apache-2.0" ]
0
31c4bf27c3dc0a84bfbb06a0c017c5908c17f0ac
https://github.com/SeHwanJoo/mmsegmentation_body/tree/31c4bf27c3dc0a84bfbb06a0c017c5908c17f0ac
import functools import torch import numpy as np import torch.nn.functional as F import torch.nn as nn import torch._C import torch.serialization def _expand_onehot_labels(labels, label_weights, target_shape, ignore_index): """Expand onehot labels to match the size of prediction.""" bin_labels = labels.new_zeros(target_shape) valid_mask = (labels >= 0) & (labels != ignore_index) inds = torch.nonzero(valid_mask, as_tuple=True) if inds[0].numel() > 0: if labels.dim() == 3: bin_labels[inds[0], labels[valid_mask], inds[1], inds[2]] = 1 else: bin_labels[inds[0], labels[valid_mask]] = 1 valid_mask = valid_mask.unsqueeze(1).expand(target_shape).float() if label_weights is None: bin_label_weights = valid_mask else: bin_label_weights = label_weights.unsqueeze(1).expand(target_shape) bin_label_weights *= valid_mask return bin_labels, bin_label_weights def reduce_loss(loss, reduction): """Reduce loss as specified. Args: loss (Tensor): Elementwise loss tensor. reduction (str): Options are "none", "mean" and "sum". Return: Tensor: Reduced loss tensor. """ reduction_enum = F._Reduction.get_enum(reduction) if reduction_enum == 0: return loss elif reduction_enum == 1: return loss.mean() elif reduction_enum == 2: return loss.sum() def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None): """Apply element-wise weight and reduce loss. Args: loss (Tensor): Element-wise loss. weight (Tensor): Element-wise weights. reduction (str): Same as built-in losses of PyTorch. avg_factor (float): Avarage factor when computing the mean of losses. Returns: Tensor: Processed loss values. """ if weight is not None: assert weight.dim() == loss.dim() if weight.dim() > 1: assert weight.size(1) == 1 or weight.size(1) == loss.size(1) loss = loss * weight if avg_factor is None: loss = reduce_loss(loss, reduction) elif reduction == 'mean': loss = loss.sum() / avg_factor elif reduction != 'none': raise ValueError('avg_factor can not be used with reduction="sum"') return loss def binary_cross_entropy(pred, label, weight=None, reduction='mean', avg_factor=None, class_weight=None, ignore_index=255): """Calculate the binary CrossEntropy loss. Args: pred (torch.Tensor): The prediction with shape (N, 1). label (torch.Tensor): The learning label of the prediction. weight (torch.Tensor, optional): Sample-wise loss weight. reduction (str, optional): The method used to reduce the loss. Options are "none", "mean" and "sum". avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. class_weight (list[float], optional): The weight for each class. ignore_index (int | None): The label index to be ignored. Default: 255 Returns: torch.Tensor: The calculated loss """ if pred.dim() != label.dim(): assert pred.dim() == 2 and label.dim() == 1 or pred.dim( ) == 4 and label.dim( ) == 3, 'Only pred shape [N, C], label shape [N] or pred shape [N, C, H, W], label shape [N, H, W] are supported' label, weight = _expand_onehot_labels(label, weight, pred.shape, ignore_index) if weight is not None: weight = weight.float() loss = F.binary_cross_entropy_with_logits(pred, label.float(), pos_weight=class_weight, reduction='none') loss = weight_reduce_loss(loss, weight, reduction=reduction, avg_factor =avg_factor) return loss def cross_entropy(pred, label, weight=None, class_weight=None, reduction= 'mean', avg_factor=None, ignore_index=-100): """The wrapper function for :func:`F.cross_entropy`""" loss = F.cross_ # ... truncated (>4000 chars) for memory efficiency
BasicBlock
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/fv/cfvfxpmpur3qlmurffwz4u56tgvw75i4lbjvzd25ortunbobyxnh.py # Topologically Sorted Source Nodes: [out_1, out_2], Original ATen: [aten._native_batch_norm_legit, aten.relu] # Source node to ATen node mapping: # out_1 => add, rsqrt, var_mean # out_2 => relu # Graph fragment: # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view, [0, 2, 3]), kwargs = {correction: 0, keepdim: True}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {}) # %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) # %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) triton_per_fused__native_batch_norm_legit_relu_0 = async_compile.triton('triton_per_fused__native_batch_norm_legit_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[16, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__native_batch_norm_legit_relu_0(in_ptr0, out_ptr0, out_ptr2, out_ptr3, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 16 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 16, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tmp17 = tmp0 - tmp10 tmp18 = 16.0 tmp19 = tmp16 / tmp18 tmp20 = 1e-05 tmp21 = tmp19 + tmp20 tmp22 = libdevice.rsqrt(tmp21) tmp23 = tmp17 * tmp22 tmp24 = tl.full([1, 1], 0, tl.int32) tmp25 = triton_helpers.maximum(tmp24, tmp23) tl.store(out_ptr2 + (r1 + (16*x0)), tmp25, xmask) tl.store(out_ptr3 + (x0), tmp22, xmask) tl.store(out_ptr0 + (x0), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/lk/clk53igo2wowtd6pq5zu23svybsi67ef5dmegrb3qnpketcev22z.py # Topologically Sorted Source Nodes: [out_4, out_6], Original ATen: [aten._native_batch_norm_legit, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # out_4 => add_1, rsqrt_1, var_mean_1 # out_6 => relu_1 # Graph fragment: # %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_5, [0, 2, 3]), kwargs = {correction: 0, keepdim: True}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-05), kwargs = {}) # %rsqrt_1 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_1,), kwargs = {}) # %relu_1 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%view_8,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%view_16, 0), kwargs = {}) triton_per_fused__native_batch_norm_legit_relu_threshold_backward_1 = async_compile.triton('triton_per_fused__native_batch_norm_legit_relu_threshold_backward_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[16, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*i1', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit_relu_threshold_backward_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__native_batch_norm_legit_relu_threshold_backward_1(in_ptr0, in_ptr1, out_ptr0, out_ptr2, out_ptr3, out_ptr4, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 16 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0) tmp24 = tl.load(in_ptr1 + (r1 + (16*x0)), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 16, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tmp17 = tmp0 - tmp10 tmp18 = 16.0 tmp19 = tmp16 / tmp18 tmp20 = 1e-05 tmp21 = tmp19 + tmp20 tmp22 = libdevice.rsqrt(tmp21) tmp23 = tmp17 * tmp22 tmp25 = tmp23 + tmp24 tmp26 = tl.full([1, 1], 0, tl.int32) tmp27 = triton_helpers.maximum(tmp26, tmp25) tmp28 = 0.0 tmp29 = tmp27 <= tmp28 tl.store(out_ptr2 + (r1 + (16*x0)), tmp27, xmask) tl.store(out_ptr3 + (r1 + (16*x0)), tmp29, xmask) tl.store(out_ptr4 + (x0), tmp22, xmask) tl.store(out_ptr0 + (x0), tmp10, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_3, (4, 4, 3, 3), (36, 9, 3, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32) buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf4 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32) # Topologically Sorted Source Nodes: [out_1, out_2], Original ATen: [aten._native_batch_norm_legit, aten.relu] stream0 = get_raw_stream(0) triton_per_fused__native_batch_norm_legit_relu_0.run(buf0, buf1, buf5, buf4, 16, 16, grid=grid(16), stream=stream0) # Topologically Sorted Source Nodes: [out_3], Original ATen: [aten.convolution] buf6 = extern_kernels.convolution(buf5, primals_3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 4, 4, 4), (64, 16, 4, 1)) buf7 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32) buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf12 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) buf10 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32) # Topologically Sorted Source Nodes: [out_4, out_6], Original ATen: [aten._native_batch_norm_legit, aten.relu, aten.threshold_backward] triton_per_fused__native_batch_norm_legit_relu_threshold_backward_1.run(buf6, primals_1, buf7, buf11, buf12, buf10, 16, 16, grid=grid(16), stream=stream0) return (buf11, primals_1, primals_2, primals_3, buf0, reinterpret_tensor(buf4, (16, ), (1, ), 0), buf5, buf6, reinterpret_tensor(buf10, (16, ), (1, ), 0), buf12, reinterpret_tensor(buf7, (1, 16, 1, 1), (16, 1, 1, 1), 0), reinterpret_tensor(buf1, (1, 16, 1, 1), (16, 1, 1, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.utils.data def conv1x1(in_planes, out_planes, stride=1): """1x1 convolution""" return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False) def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1): """3x3 convolution with padding""" return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=dilation, groups=groups, bias=False, dilation=dilation) class BasicBlock(nn.Module): expansion = 1 __constants__ = ['downsample'] def __init__(self, inplanes, planes, stride=1, downsample=None, groups= 1, base_width=64, dilation=1, norm_model='instance'): super(BasicBlock, self).__init__() if 'instance' == norm_model: norm_layer = nn.InstanceNorm2d else: norm_layer = nn.BatchNorm2d if groups != 1 or base_width != 64: raise ValueError( 'BasicBlock only supports groups=1 and base_width=64') if dilation > 1: raise NotImplementedError( 'Dilation > 1 not supported in BasicBlock') self.conv1 = conv3x3(inplanes, planes, stride) self.bn1 = norm_layer(planes) self.relu = nn.ReLU(inplace=True) self.conv2 = conv3x3(planes, planes) self.bn2 = norm_layer(planes) self.stride = stride if stride != 1 or inplanes != planes: self.downsample = nn.Sequential(conv1x1(inplanes, planes, stride), norm_layer(planes)) else: self.downsample = downsample def forward(self, x): identity = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) if self.downsample is not None: identity = self.downsample(x) out += identity out = self.relu(out) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'inplanes': 4, 'planes': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused__native_batch_norm_legit_relu_0(in_ptr0, out_ptr0, out_ptr2, out_ptr3, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tl.where(xmask, tmp1, 0) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 16, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tmp17 = tmp0 - tmp10 tmp18 = 16.0 tmp19 = tmp16 / tmp18 tmp20 = 1e-05 tmp21 = tmp19 + tmp20 tmp22 = libdevice.rsqrt(tmp21) tmp23 = tmp17 * tmp22 tmp24 = tl.full([1, 1], 0, tl.int32) tmp25 = triton_helpers.maximum(tmp24, tmp23) tl.store(out_ptr2 + (r1 + 16 * x0), tmp25, xmask) tl.store(out_ptr3 + x0, tmp22, xmask) tl.store(out_ptr0 + x0, tmp10, xmask) @triton.jit def triton_per_fused__native_batch_norm_legit_relu_threshold_backward_1(in_ptr0 , in_ptr1, out_ptr0, out_ptr2, out_ptr3, out_ptr4, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp24 = tl.load(in_ptr1 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tl.where(xmask, tmp1, 0) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 16, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tmp17 = tmp0 - tmp10 tmp18 = 16.0 tmp19 = tmp16 / tmp18 tmp20 = 1e-05 tmp21 = tmp19 + tmp20 tmp22 = libdevice.rsqrt(tmp21) tmp23 = tmp17 * tmp22 tmp25 = tmp23 + tmp24 tmp26 = tl.full([1, 1], 0, tl.int32) tmp27 = triton_helpers.maximum(tmp26, tmp25) tmp28 = 0.0 tmp29 = tmp27 <= tmp28 tl.store(out_ptr2 + (r1 + 16 * x0), tmp27, xmask) tl.store(out_ptr3 + (r1 + 16 * x0), tmp29, xmask) tl.store(out_ptr4 + x0, tmp22, xmask) tl.store(out_ptr0 + x0, tmp10, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_3, (4, 4, 3, 3), (36, 9, 3, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32 ) buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf4 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32 ) get_raw_stream(0) triton_per_fused__native_batch_norm_legit_relu_0[grid(16)](buf0, buf1, buf5, buf4, 16, 16, XBLOCK=8, num_warps=2, num_stages=1) buf6 = extern_kernels.convolution(buf5, primals_3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 4, 4, 4), (64, 16, 4, 1)) buf7 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32 ) buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf12 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) buf10 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch. float32) triton_per_fused__native_batch_norm_legit_relu_threshold_backward_1[ grid(16)](buf6, primals_1, buf7, buf11, buf12, buf10, 16, 16, XBLOCK=8, num_warps=2, num_stages=1) return buf11, primals_1, primals_2, primals_3, buf0, reinterpret_tensor( buf4, (16,), (1,), 0), buf5, buf6, reinterpret_tensor(buf10, (16,), (1,), 0), buf12, reinterpret_tensor(buf7, (1, 16, 1, 1), (16, 1, 1, 1), 0), reinterpret_tensor(buf1, (1, 16, 1, 1), (16, 1, 1, 1), 0) def conv1x1(in_planes, out_planes, stride=1): """1x1 convolution""" return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False) def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1): """3x3 convolution with padding""" return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=dilation, groups=groups, bias=False, dilation=dilation) class BasicBlockNew(nn.Module): expansion = 1 __constants__ = ['downsample'] def __init__(self, inplanes, planes, stride=1, downsample=None, groups= 1, base_width=64, dilation=1, norm_model='instance'): super(BasicBlockNew, self).__init__() if 'instance' == norm_model: norm_layer = nn.InstanceNorm2d else: norm_layer = nn.BatchNorm2d if groups != 1 or base_width != 64: raise ValueError( 'BasicBlock only supports groups=1 and base_width=64') if dilation > 1: raise NotImplementedError( 'Dilation > 1 not supported in BasicBlock') self.conv1 = conv3x3(inplanes, planes, stride) self.bn1 = norm_layer(planes) self.relu = nn.ReLU(inplace=True) self.conv2 = conv3x3(planes, planes) self.bn2 = norm_layer(planes) self.stride = stride if stride != 1 or inplanes != planes: self.downsample = nn.Sequential(conv1x1(inplanes, planes, stride), norm_layer(planes)) else: self.downsample = downsample def forward(self, input_0): primals_2 = self.conv1.weight primals_3 = self.conv2.weight primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
SebyakinAndrei/MichiGAN
BasicBlock
false
1,050
[ "MIT" ]
0
6584c9a106b33096f38e8f5b11d0320f7065fd26
https://github.com/SebyakinAndrei/MichiGAN/tree/6584c9a106b33096f38e8f5b11d0320f7065fd26
import torch import torch.nn as nn import torch.utils.data def conv1x1(in_planes, out_planes, stride=1): """1x1 convolution""" return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False) def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1): """3x3 convolution with padding""" return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=dilation, groups=groups, bias=False, dilation=dilation) class Model(nn.Module): expansion = 1 __constants__ = ['downsample'] def __init__(self, inplanes, planes, stride=1, downsample=None, groups= 1, base_width=64, dilation=1, norm_model='instance'): super().__init__() if 'instance' == norm_model: norm_layer = nn.InstanceNorm2d else: norm_layer = nn.BatchNorm2d if groups != 1 or base_width != 64: raise ValueError( 'BasicBlock only supports groups=1 and base_width=64') if dilation > 1: raise NotImplementedError( 'Dilation > 1 not supported in BasicBlock') self.conv1 = conv3x3(inplanes, planes, stride) self.bn1 = norm_layer(planes) self.relu = nn.ReLU(inplace=True) self.conv2 = conv3x3(planes, planes) self.bn2 = norm_layer(planes) self.stride = stride if stride != 1 or inplanes != planes: self.downsample = nn.Sequential(conv1x1(inplanes, planes, stride), norm_layer(planes)) else: self.downsample = downsample def forward(self, x): identity = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) if self.downsample is not None: identity = self.downsample(x) out += identity out = self.relu(out) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 4]
BahdanauAttention
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/ao/caoovxtqrx42gvkmjirowqmmbh6kppvfh5ebrzzv4kzkgwm2umii.py # Topologically Sorted Source Nodes: [processed_query], Original ATen: [aten.clone] # Source node to ATen node mapping: # processed_query => clone # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_1,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) % 4 x2 = (xindex // 16) x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (4*x2) + (16*x1)), xmask) tl.store(out_ptr0 + (x3), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/bg/cbgmsaps4ljzc6rkbd4imsj3jo73tgvkd46dy7obklnnvintmaea.py # Topologically Sorted Source Nodes: [out], Original ATen: [aten.mv] # Source node to ATen node mapping: # out => mul, sum_1 # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_4, %primals_5), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {}) triton_poi_fused_mv_1 = async_compile.triton('triton_poi_fused_mv_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mv_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 12, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mv_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*(x0 // 4)), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + ((4*(x0 % 4)) + (16*(x0 // 16))), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr2 + (0)) tmp5 = tl.broadcast_to(tmp4, [XBLOCK]) tmp7 = tl.load(in_ptr0 + (1 + (4*(x0 // 4))), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (1 + (4*(x0 % 4)) + (16*(x0 // 16))), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr2 + (1)) tmp12 = tl.broadcast_to(tmp11, [XBLOCK]) tmp15 = tl.load(in_ptr0 + (2 + (4*(x0 // 4))), xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr1 + (2 + (4*(x0 % 4)) + (16*(x0 // 16))), xmask, eviction_policy='evict_last') tmp19 = tl.load(in_ptr2 + (2)) tmp20 = tl.broadcast_to(tmp19, [XBLOCK]) tmp23 = tl.load(in_ptr0 + (3 + (4*(x0 // 4))), xmask, eviction_policy='evict_last') tmp24 = tl.load(in_ptr1 + (3 + (4*(x0 % 4)) + (16*(x0 // 16))), xmask, eviction_policy='evict_last') tmp27 = tl.load(in_ptr2 + (3)) tmp28 = tl.broadcast_to(tmp27, [XBLOCK]) tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tmp6 = tmp3 * tmp5 tmp9 = tmp7 + tmp8 tmp10 = libdevice.tanh(tmp9) tmp13 = tmp10 * tmp12 tmp14 = tmp6 + tmp13 tmp17 = tmp15 + tmp16 tmp18 = libdevice.tanh(tmp17) tmp21 = tmp18 * tmp20 tmp22 = tmp14 + tmp21 tmp25 = tmp23 + tmp24 tmp26 = libdevice.tanh(tmp25) tmp29 = tmp26 * tmp28 tmp30 = tmp22 + tmp29 tl.store(out_ptr0 + (x0), tmp30, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/hg/chg3iq6bscxmmxv5f7tuzgwycb4mgrimwfhv2nauw5rj4tt5cmv2.py # Topologically Sorted Source Nodes: [scores_normalized], Original ATen: [aten._softmax] # Source node to ATen node mapping: # scores_normalized => amax, exp, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_5, [-1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_5, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x2), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/zu/czuvep3dmpmqmhiiliwubh4ghdt2qr27va67sszkua7trziinwov.py # Topologically Sorted Source Nodes: [scores_normalized], Original ATen: [aten._softmax] # Source node to ATen node mapping: # scores_normalized => div, sum_2 # Graph fragment: # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %div : [num_users=3] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_2), kwargs = {}) triton_poi_fused__softmax_3 = async_compile.triton('triton_poi_fused__softmax_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [processed_query], Original ATen: [aten.clone] stream0 = get_raw_stream(0) triton_poi_fused_clone_0.run(primals_2, buf0, 64, grid=grid(64), stream=stream0) del primals_2 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [processed_query], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf1) del primals_3 buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [processed_key], Original ATen: [aten.clone] triton_poi_fused_clone_0.run(primals_1, buf2, 64, grid=grid(64), stream=stream0) buf3 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [processed_key], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf3) del primals_4 buf4 = empty_strided_cuda((64, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [out], Original ATen: [aten.mv] triton_poi_fused_mv_1.run(buf1, buf3, primals_5, buf4, 64, grid=grid(64), stream=stream0) buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [scores_normalized], Original ATen: [aten._softmax] triton_poi_fused__softmax_2.run(buf4, buf5, 64, grid=grid(64), stream=stream0) buf6 = reinterpret_tensor(buf4, (4, 4, 4), (16, 4, 1), 0); del buf4 # reuse # Topologically Sorted Source Nodes: [scores_normalized], Original ATen: [aten._softmax] triton_poi_fused__softmax_3.run(buf5, buf6, 64, grid=grid(64), stream=stream0) buf7 = buf5; del buf5 # reuse # Topologically Sorted Source Nodes: [context], Original ATen: [aten.bmm] extern_kernels.bmm(buf6, reinterpret_tensor(primals_1, (4, 4, 4), (4, 16, 1), 0), out=buf7) return (reinterpret_tensor(buf7, (4, 4, 4), (4, 16, 1), 0), reinterpret_tensor(buf6, (4, 4, 4), (4, 16, 1), 0), primals_5, reinterpret_tensor(buf0, (16, 4), (4, 1), 0), buf1, reinterpret_tensor(buf2, (16, 4), (4, 1), 0), buf3, buf6, reinterpret_tensor(primals_1, (4, 4, 4), (4, 1, 16), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch import torch.nn.functional as F from torch import nn from torch.nn.parameter import Parameter import torch.nn.parallel import torch.utils.data import torch.onnx import torch.optim import torch.utils.data.distributed class BahdanauAttention(nn.Module): """ It should be very similar to tf.contrib.seq2seq.BahdanauAttention """ def __init__(self, query_size, key_size, num_units, normalize=False, dropout=0, batch_first=False): super(BahdanauAttention, self).__init__() self.normalize = normalize self.batch_first = batch_first self.num_units = num_units self.linear_q = nn.Linear(query_size, num_units, bias=False) self.linear_k = nn.Linear(key_size, num_units, bias=False) self.linear_att = Parameter(torch.Tensor(num_units)) self.dropout = nn.Dropout(dropout) self.mask = None if self.normalize: self.normalize_scalar = Parameter(torch.Tensor(1)) self.normalize_bias = Parameter(torch.Tensor(num_units)) else: self.register_parameter('normalize_scalar', None) self.register_parameter('normalize_bias', None) self.reset_parameters() def reset_parameters(self): stdv = 1.0 / math.sqrt(self.num_units) self.linear_att.data.uniform_(-stdv, stdv) if self.normalize: self.normalize_scalar.data.fill_(stdv) self.normalize_bias.data.zero_() def set_mask(self, context_len, context): """ sets self.mask which is applied before softmax ones for inactive context fields, zeros for active context fields :param context_len: b :param context: if batch_first: (b x t_k x n) else: (t_k x b x n) self.mask: (b x t_k) """ if self.batch_first: max_len = context.size(1) else: max_len = context.size(0) indices = torch.arange(0, max_len, dtype=torch.int64, device= context.device) self.mask = indices >= context_len.unsqueeze(1) def calc_score(self, att_query, att_keys): """ Calculate Bahdanau score :param att_query: b x t_q x n :param att_keys: b x t_k x n return b x t_q x t_k scores """ b, t_k, n = att_keys.size() t_q = att_query.size(1) att_query = att_query.unsqueeze(2).expand(b, t_q, t_k, n) att_keys = att_keys.unsqueeze(1).expand(b, t_q, t_k, n) sum_qk = att_query + att_keys if self.normalize: sum_qk = sum_qk + self.normalize_bias tmp = self.linear_att linear_att = tmp / tmp.norm() linear_att = linear_att linear_att = linear_att * self.normalize_scalar else: linear_att = self.linear_att out = F.tanh(sum_qk).matmul(linear_att) return out def forward(self, query, keys): """ :param query: if batch_first: (b x t_q x n) else: (t_q x b x n) :param keys: if batch_first: (b x t_k x n) else (t_k x b x n) :returns: (context, scores_normalized) context: if batch_first: (b x t_q x n) else (t_q x b x n) scores_normalized: if batch_first (b x t_q x t_k) else (t_q x b x t_k) """ if not self.batch_first: keys = keys.transpose(0, 1) if query.dim() == 3: query = query.transpose(0, 1) if query.dim() == 2: single_query = True query = query.unsqueeze(1) else: single_query = False b = query.size(0) t_k = keys.size(1) t_q = query.size(1) processed_query = self.linear_q(query) processed_key = self.linear_k(keys) scores = self.calc_score(processed_query, processed_key) if self.mask is not None: mask = self.mask.unsqueeze(1).expand(b, t_q, t_k) scores.data.masked_fill_(mask, -65504.0) scores_normalized = F.softmax(scores, dim=-1) scores_normalized = self.dropout(scores_normalized) context = torch.bmm(scores_normalized, keys) if single_query: context = context.squeeze(1) scores_normalized = scores_normalized.squeeze(1) elif not self.batch_first: context = context.transpose(0, 1) scores_normalized = scores_normalized.transpose(0, 1) return context, scores_normalized def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'query_size': 4, 'key_size': 4, 'num_units': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import math import torch.nn.functional as F from torch import nn from torch.nn.parameter import Parameter import torch.nn.parallel import torch.utils.data import torch.onnx import torch.optim import torch.utils.data.distributed assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1), xmask) tl.store(out_ptr0 + x3, tmp0, xmask) @triton.jit def triton_poi_fused_mv_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * (x0 // 4), xmask, eviction_policy='evict_last' ) tmp1 = tl.load(in_ptr1 + (4 * (x0 % 4) + 16 * (x0 // 16)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr2 + 0) tmp5 = tl.broadcast_to(tmp4, [XBLOCK]) tmp7 = tl.load(in_ptr0 + (1 + 4 * (x0 // 4)), xmask, eviction_policy= 'evict_last') tmp8 = tl.load(in_ptr1 + (1 + 4 * (x0 % 4) + 16 * (x0 // 16)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr2 + 1) tmp12 = tl.broadcast_to(tmp11, [XBLOCK]) tmp15 = tl.load(in_ptr0 + (2 + 4 * (x0 // 4)), xmask, eviction_policy= 'evict_last') tmp16 = tl.load(in_ptr1 + (2 + 4 * (x0 % 4) + 16 * (x0 // 16)), xmask, eviction_policy='evict_last') tmp19 = tl.load(in_ptr2 + 2) tmp20 = tl.broadcast_to(tmp19, [XBLOCK]) tmp23 = tl.load(in_ptr0 + (3 + 4 * (x0 // 4)), xmask, eviction_policy= 'evict_last') tmp24 = tl.load(in_ptr1 + (3 + 4 * (x0 % 4) + 16 * (x0 // 16)), xmask, eviction_policy='evict_last') tmp27 = tl.load(in_ptr2 + 3) tmp28 = tl.broadcast_to(tmp27, [XBLOCK]) tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tmp6 = tmp3 * tmp5 tmp9 = tmp7 + tmp8 tmp10 = libdevice.tanh(tmp9) tmp13 = tmp10 * tmp12 tmp14 = tmp6 + tmp13 tmp17 = tmp15 + tmp16 tmp18 = libdevice.tanh(tmp17) tmp21 = tmp18 * tmp20 tmp22 = tmp14 + tmp21 tmp25 = tmp23 + tmp24 tmp26 = libdevice.tanh(tmp25) tmp29 = tmp26 * tmp28 tmp30 = tmp22 + tmp29 tl.store(out_ptr0 + x0, tmp30, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(64)](primals_2, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_2 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf1) del primals_3 buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_clone_0[grid(64)](primals_1, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) buf3 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf3) del primals_4 buf4 = empty_strided_cuda((64,), (1,), torch.float32) triton_poi_fused_mv_1[grid(64)](buf1, buf3, primals_5, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1) buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_2[grid(64)](buf4, buf5, 64, XBLOCK=64, num_warps=1, num_stages=1) buf6 = reinterpret_tensor(buf4, (4, 4, 4), (16, 4, 1), 0) del buf4 triton_poi_fused__softmax_3[grid(64)](buf5, buf6, 64, XBLOCK=64, num_warps=1, num_stages=1) buf7 = buf5 del buf5 extern_kernels.bmm(buf6, reinterpret_tensor(primals_1, (4, 4, 4), ( 4, 16, 1), 0), out=buf7) return reinterpret_tensor(buf7, (4, 4, 4), (4, 16, 1), 0 ), reinterpret_tensor(buf6, (4, 4, 4), (4, 16, 1), 0 ), primals_5, reinterpret_tensor(buf0, (16, 4), (4, 1), 0 ), buf1, reinterpret_tensor(buf2, (16, 4), (4, 1), 0 ), buf3, buf6, reinterpret_tensor(primals_1, (4, 4, 4), (4, 1, 16), 0) class BahdanauAttentionNew(nn.Module): """ It should be very similar to tf.contrib.seq2seq.BahdanauAttention """ def __init__(self, query_size, key_size, num_units, normalize=False, dropout=0, batch_first=False): super(BahdanauAttentionNew, self).__init__() self.normalize = normalize self.batch_first = batch_first self.num_units = num_units self.linear_q = nn.Linear(query_size, num_units, bias=False) self.linear_k = nn.Linear(key_size, num_units, bias=False) self.linear_att = Parameter(torch.Tensor(num_units)) self.dropout = nn.Dropout(dropout) self.mask = None if self.normalize: self.normalize_scalar = Parameter(torch.Tensor(1)) self.normalize_bias = Parameter(torch.Tensor(num_units)) else: self.register_parameter('normalize_scalar', None) self.register_parameter('normalize_bias', None) self.reset_parameters() def reset_parameters(self): stdv = 1.0 / math.sqrt(self.num_units) self.linear_att.data.uniform_(-stdv, stdv) if self.normalize: self.normalize_scalar.data.fill_(stdv) self.normalize_bias.data.zero_() def set_mask(self, context_len, context): """ sets self.mask which is applied before softmax ones for inactive context fields, zeros for active context fields :param context_len: b :param context: if batch_first: (b x t_k x n) else: (t_k x b x n) self.mask: (b x t_k) """ if self.batch_first: max_len = context.size(1) else: max_len = context.size(0) indices = torch.arange(0, max_len, dtype=torch.int64, device= context.device) self.mask = indices >= context_len.unsqueeze(1) def calc_score(self, att_query, att_keys): """ Calculate Bahdanau score :param att_query: b x t_q x n :param att_keys: b x t_k x n return b x t_q x t_k scores """ b, t_k, n = att_keys.size() t_q = att_query.size(1) att_query = att_query.unsqueeze(2).expand(b, t_q, t_k, n) att_keys = att_keys.unsqueeze(1).expand(b, t_q, t_k, n) sum_qk = att_query + att_keys if self.normalize: sum_qk = sum_qk + self.normalize_bias tmp = self.linear_att linear_att = tmp / tmp.norm() linear_att = linear_att linear_att = linear_att * self.normalize_scalar else: linear_att = self.linear_att out = F.tanh(sum_qk).matmul(linear_att) return out def forward(self, input_0, input_1): primals_5 = self.linear_att primals_3 = self.linear_q.weight primals_4 = self.linear_k.weight primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0], output[1]
SerailHydra/examples
BahdanauAttention
false
1,051
[ "BSD-3-Clause" ]
0
547226ff28032d4dab1dbf26e0b5f8b8276d79ae
https://github.com/SerailHydra/examples/tree/547226ff28032d4dab1dbf26e0b5f8b8276d79ae
import math import torch import torch.nn.functional as F from torch import nn from torch.nn.parameter import Parameter import torch.nn.parallel import torch.utils.data import torch.onnx import torch.optim import torch.utils.data.distributed class Model(nn.Module): """ It should be very similar to tf.contrib.seq2seq.BahdanauAttention """ def __init__(self, query_size, key_size, num_units, normalize=False, dropout=0, batch_first=False): super().__init__() self.normalize = normalize self.batch_first = batch_first self.num_units = num_units self.linear_q = nn.Linear(query_size, num_units, bias=False) self.linear_k = nn.Linear(key_size, num_units, bias=False) self.linear_att = Parameter(torch.Tensor(num_units)) self.dropout = nn.Dropout(dropout) self.mask = None if self.normalize: self.normalize_scalar = Parameter(torch.Tensor(1)) self.normalize_bias = Parameter(torch.Tensor(num_units)) else: self.register_parameter('normalize_scalar', None) self.register_parameter('normalize_bias', None) self.reset_parameters() def reset_parameters(self): stdv = 1.0 / math.sqrt(self.num_units) self.linear_att.data.uniform_(-stdv, stdv) if self.normalize: self.normalize_scalar.data.fill_(stdv) self.normalize_bias.data.zero_() def set_mask(self, context_len, context): """ sets self.mask which is applied before softmax ones for inactive context fields, zeros for active context fields :param context_len: b :param context: if batch_first: (b x t_k x n) else: (t_k x b x n) self.mask: (b x t_k) """ if self.batch_first: max_len = context.size(1) else: max_len = context.size(0) indices = torch.arange(0, max_len, dtype=torch.int64, device= context.device) self.mask = indices >= context_len.unsqueeze(1) def calc_score(self, att_query, att_keys): """ Calculate Bahdanau score :param att_query: b x t_q x n :param att_keys: b x t_k x n return b x t_q x t_k scores """ b, t_k, n = att_keys.size() t_q = att_query.size(1) att_query = att_query.unsqueeze(2).expand(b, t_q, t_k, n) att_keys = att_keys.unsqueeze(1).expand(b, t_q, t_k, n) sum_qk = att_query + att_keys if self.normalize: sum_qk = sum_qk + self.normalize_bias tmp = self.linear_att linear_att = tmp / tmp.norm() linear_att = linear_att linear_att = linear_att * self.normalize_scalar else: linear_att = self.linear_att out = F.tanh(sum_qk).matmul(linear_att) return out def forward(self, query, keys): """ :param query: if batch_first: (b x t_q x n) else: (t_q x b x n) :param keys: if batch_first: (b x t_k x n) else (t_k x b x n) :returns: (context, scores_normalized) context: if batch_first: (b x t_q x n) else (t_q x b x n) scores_normalized: if batch_first (b x t_q x t_k) else (t_q x b x t_k) """ if not self.batch_first: keys = keys.transpose(0, 1) if query.dim() == 3: query = query.transpose(0, 1) if query.dim() == 2: single_query = True query = query.unsqueeze(1) else: single_query = False b = query.size(0) t_k = keys.size(1) t_q = query.size(1) processed_query = self.linear_q(query) processed_key = self.linear_k(keys) scores = self.calc_score(processed_query, processed_key) if self.mask is not None: mask = self.mask.unsqueeze(1).expand(b, t_q, t_k) scores.data.masked_fill_(mask, -65504.0) scores_normalized = F.softmax # ... truncated (>4000 chars) for memory efficiency
StateActionEmbedding
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/t4/ct4abpxorr7ywpffawu2dyuam53y6icpnxs5d4cvirqopww6wnuh.py # Topologically Sorted Source Nodes: [concatinated_batch], Original ATen: [aten.cat] # Source node to ATen node mapping: # concatinated_batch => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%arg0_1, %arg1_1], 2), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 4) % 8 x0 = xindex % 4 x2 = (xindex // 32) x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + (4*x1) + (16*x2)), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.load(in_ptr1 + (x0 + (4*((-4) + x1)) + (16*x2)), tmp6 & xmask, other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + (x3), tmp10, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 8, 4), (128, 32, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [concatinated_batch], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(arg0_1, arg1_1, buf0, 512, grid=grid(512), stream=stream0) del arg0_1 del arg1_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch import numpy as np from abc import ABC from abc import abstractmethod from abc import abstractproperty from torch import nn from enum import Enum def tensor_to_numpy(tensor): return tensor.detach().cpu().numpy() class MLPParamHandler(ABC): def __init__(self) ->None: """Interface for parameter handler. For Algorithms that require data on past model parameters this module handels this data such that is it is in the right format according to the specific algorithm. """ super().__init__() @abstractmethod def get_policy_replay_data(self, model: 'torch.nn.Module'): """Function to extract replay data from a module in a format that it can be saved to the replay buffer. Args: model (torch.nn.Module): Module to extract data from. """ ... @abstractmethod def get_policy_critic_data(self, model: 'torch.nn.Module'): """Function to extract data from a policy that the critic requires to evaluate it. Args: model (torch.nn.Module): Module to extract data from. """ ... @abstractmethod def format_replay_buffer_data(self, **kwargs): """Function to format data from the replay buffer such that it can be used as input to the critic afterwards. """ ... @abstractproperty def replay_data_keys(self): """Keys used to save data to the replay buffer. """ ... @abstractproperty def replay_data_info(self): """Description of the data needed to initialize the replay buffer. """ ... class FlatParamHandler(MLPParamHandler): def __init__(self, example_policy: 'torch.nn.Module') ->None: """Parameter handler that simply takes all parameters flattens them and saves them in one vector. Args: example_policy (torch.nn.Module): Example policy network to acquire shape of data. """ super().__init__() self._replay_data_keys = [constants.DATA_PARAMETERS] self._replay_data_info = {self._replay_data_keys[0]: {'shape': self .get_policy_critic_data(example_policy).shape}} def get_policy_replay_data(self, model: 'torch.nn.Module'): return {self._replay_data_keys[0]: tensor_to_numpy(torch.nn.utils. parameters_to_vector(model.parameters())).reshape(1, -1)} def get_policy_critic_data(self, model: 'torch.nn.Module'): return torch.nn.utils.parameters_to_vector(model.parameters()).reshape( 1, -1) def format_replay_buffer_data(self, **kwargs): return kwargs[self._replay_data_keys[0]] @property def replay_data_keys(self): return self._replay_data_keys @property def replay_data_info(self): return self._replay_data_info class NamedParamHandler(MLPParamHandler): def __init__(self, example_policy: 'torch.nn.Module') ->None: """Parameter handler that saves parameters in a dictionary shape such that the parameters are saved in a similar format of how they are used in the actual module. Useful if the Parameters are later reused similarly to how the are used within the module they are extracted from. Args: example_policy (torch.nn.Module): Example policy network to acquire structure of module and according dictionary. """ super().__init__() actor_parameter_dict = self.get_policy_critic_data(example_policy) self._replay_data_keys = actor_parameter_dict.keys() self._replay_data_info = {key: {'shape': actor_parameter_dict[key]. shape[1:]} for key in self._replay_data_keys} def get_policy_replay_data(self, model: 'torch.nn.Module'): batched_param_dict = self.get_policy_critic_data(model) return {key: tensor_to_numpy(value) for key, value in batched_param_dict.items()} def get_policy_critic_data(self, model: 'torch.nn.Module'): param_dict = dict(model.named_parameters()) return {key: torch.unsqueeze(tensor, dim=0) for key, tensor in param_dict.items()} def format_replay_buffer_data(self, **kwargs): return {key: kwargs[key] for key in self._replay_data_keys} @property def replay_data_info(self): return self._replay_data_info @property def replay_data_keys(self): return self._replay_data_keys class StateActionHandler(MLPParamHandler): def __init__(self, num_state_action_pairs: 'int', episode_length: 'int', rollout_handler: 'RolloutHandler') ->None: """Parameter handler that does not actually use parameters but rather state action pairs as representation of policies. Args: num_state_action_pairs (int): Number of state action pairs used as a representation for a policy. episode_length (int): Maximal time steps of a episode used for representation purposes. rollout_handler (RolloutHandler): Rollout handler used to execute rollouts when needed. """ super().__init__() self.rollout_handler = rollout_handler self.num_state_action_pairs = num_state_action_pairs self.episode_length = episode_length self._replay_data_keys = [constants.DATA_OBSERVATIONS, constants. DATA_ACTIONS] self._replay_data_info = {constants.DATA_OBSERVATIONS: {'shape': ( episode_length, *rollout_handler.environment_handler. exploration_environment.observation_space.shape)}, constants. DATA_ACTIONS: {'shape': (episode_length, *rollout_handler. environment_handler.exploration_environment.action_space.shape)}} def get_policy_replay_data(self, model: 'torch.nn.Module'): return {} def get_policy_critic_data(self, model: 'torch.nn.Module'): rollout_data = self.rollout_handler.update_rollout(policy=model, extraction_keys=[constants.DATA_OBSERVATIONS, constants. DATA_ACTIONS]) sampled_states, sampeled_actions = self.format_replay_buffer_data(** rollout_data) return sampled_states, sampeled_actions def format_replay_buffer_data(self, **kwargs): states = kwargs[constants.DATA_OBSERVATIONS] actions = kwargs[constants.DATA_ACTIONS] sampled_states, sampeled_actions = self._sample_state_action_paris( states, actions) return sampled_states, sampeled_actions def _sample_state_action_paris(self, states, actions): """To make sure the number of state actions paris is always the same, this function sub samples the desired amount from a state action batch. This also acts as a kind of data augmentation as the representation of a single policy will consist of different state action pairs if called multiple times. Args: states (np.ndarray): Batch of states to sub sample from. actions (np.ndarray): Batch of actions (according to states) to sum sample from. Returns: sampled_states (np.ndarray): Sub sampled states sampeled_actions (np.ndarray): Sub sampled actions """ sample_id = np.random.choice(range(self.episode_length), size=self. num_state_action_pairs, replace=False) sampled_states = states[:, sample_id] sampeled_actions = actions[:, sample_id] return sampled_states, sampeled_actions @property def replay_data_info(self): return self._replay_data_info @property def replay_data_keys(self): return self._replay_data_keys class ParameterFormat(Enum): FlatParameters = FlatParamHandler NamedParameters = NamedParamHandler StateAction = StateActionHandler class MLPEmbeddingNetwork(nn.Module): def __init__(self): super(MLPEmbeddingNetwork, self).__init__() @abstractproperty def embedding_size(self) ->int: ... @abstractproperty def input_type(self) ->ParameterFormat: ... class StateActionEmbedding(MLPEmbeddingNetwork): def __init__(self, num_state_action_pairs: 'int', observation_space: 'gym.Space', action_space: 'gym.Space'): super(StateActionEmbedding, self).__init__() self.observation_shape = observation_space.shape self.action_shape = action_space.shape self.num_state_action_pairs = num_state_action_pairs self._embedding_size = num_state_action_pairs * (math.prod(self. observation_shape) + math.prod(self.action_shape)) def forward(self, states, actions): concatinated_batch = torch.cat([states, actions], dim=2) return concatinated_batch @property def embedding_size(self) ->int: return self._embedding_size @property def input_type(self) ->ParameterFormat: return ParameterFormat.StateAction def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_state_action_pairs': 4, 'observation_space': torch. rand([4, 4]), 'action_space': torch.rand([4, 4])}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import math import numpy as np from abc import ABC from abc import abstractmethod from abc import abstractproperty from torch import nn from enum import Enum assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 8 x0 = xindex % 4 x2 = xindex // 32 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 4 * x1 + 16 * x2), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (x0 + 4 * (-4 + x1) + 16 * x2), tmp6 & xmask, other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x3, tmp10, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 8, 4), (128, 32, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(512)](arg0_1, arg1_1, buf0, 512, XBLOCK =256, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf0, def tensor_to_numpy(tensor): return tensor.detach().cpu().numpy() class MLPParamHandler(ABC): def __init__(self) ->None: """Interface for parameter handler. For Algorithms that require data on past model parameters this module handels this data such that is it is in the right format according to the specific algorithm. """ super().__init__() @abstractmethod def get_policy_replay_data(self, model: 'torch.nn.Module'): """Function to extract replay data from a module in a format that it can be saved to the replay buffer. Args: model (torch.nn.Module): Module to extract data from. """ ... @abstractmethod def get_policy_critic_data(self, model: 'torch.nn.Module'): """Function to extract data from a policy that the critic requires to evaluate it. Args: model (torch.nn.Module): Module to extract data from. """ ... @abstractmethod def format_replay_buffer_data(self, **kwargs): """Function to format data from the replay buffer such that it can be used as input to the critic afterwards. """ ... @abstractproperty def replay_data_keys(self): """Keys used to save data to the replay buffer. """ ... @abstractproperty def replay_data_info(self): """Description of the data needed to initialize the replay buffer. """ ... class FlatParamHandler(MLPParamHandler): def __init__(self, example_policy: 'torch.nn.Module') ->None: """Parameter handler that simply takes all parameters flattens them and saves them in one vector. Args: example_policy (torch.nn.Module): Example policy network to acquire shape of data. """ super().__init__() self._replay_data_keys = [constants.DATA_PARAMETERS] self._replay_data_info = {self._replay_data_keys[0]: {'shape': self .get_policy_critic_data(example_policy).shape}} def get_policy_replay_data(self, model: 'torch.nn.Module'): return {self._replay_data_keys[0]: tensor_to_numpy(torch.nn.utils. parameters_to_vector(model.parameters())).reshape(1, -1)} def get_policy_critic_data(self, model: 'torch.nn.Module'): return torch.nn.utils.parameters_to_vector(model.parameters()).reshape( 1, -1) def format_replay_buffer_data(self, **kwargs): return kwargs[self._replay_data_keys[0]] @property def replay_data_keys(self): return self._replay_data_keys @property def replay_data_info(self): return self._replay_data_info class NamedParamHandler(MLPParamHandler): def __init__(self, example_policy: 'torch.nn.Module') ->None: """Parameter handler that saves parameters in a dictionary shape such that the parameters are saved in a similar format of how they are used in the actual module. Useful if the Parameters are later reused similarly to how the are used within the module they are extracted from. Args: example_policy (torch.nn.Module): Example policy network to acquire structure of module and according dictionary. """ super().__init__() actor_parameter_dict = self.get_policy_critic_data(example_policy) self._replay_data_keys = actor_parameter_dict.keys() self._replay_data_info = {key: {'shape': actor_parameter_dict[key]. shape[1:]} for key in self._replay_data_keys} def get_policy_replay_data(self, model: 'torch.nn.Module'): batched_param_dict = self.get_policy_critic_data(model) return {key: tensor_to_numpy(value) for key, value in batched_param_dict.items()} def get_policy_critic_data(self, model: 'torch.nn.Module'): param_dict = dict(model.named_parameters()) return {key: torch.unsqueeze(tensor, dim=0) for key, tensor in param_dict.items()} def format_replay_buffer_data(self, **kwargs): return {key: kwargs[key] for key in self._replay_data_keys} @property def replay_data_info(self): return self._replay_data_info @property def replay_data_keys(self): return self._replay_data_keys class StateActionHandler(MLPParamHandler): def __init__(self, num_state_action_pairs: 'int', episode_length: 'int', rollout_handler: 'RolloutHandler') ->None: """Parameter handler that does not actually use parameters but rather state action pairs as representation of policies. Args: num_state_action_pairs (int): Number of state action pairs used as a representation for a policy. episode_length (int): Maximal time steps of a episode used for representation purposes. rollout_handler (RolloutHandler): Rollout handler used to execute rollouts when needed. """ super().__init__() self.rollout_handler = rollout_handler self.num_state_action_pairs = num_state_action_pairs self.episode_length = episode_length self._replay_data_keys = [constants.DATA_OBSERVATIONS, constants. DATA_ACTIONS] self._replay_data_info = {constants.DATA_OBSERVATIONS: {'shape': ( episode_length, *rollout_handler.environment_handler. exploration_environment.observation_space.shape)}, constants. DATA_ACTIONS: {'shape': (episode_length, *rollout_handler. environment_handler.exploration_environment.action_space.shape)}} def get_policy_replay_data(self, model: 'torch.nn.Module'): return {} def get_policy_critic_data(self, model: 'torch.nn.Module'): rollout_data = self.rollout_handler.update_rollout(policy=model, extraction_keys=[constants.DATA_OBSERVATIONS, constants. DATA_ACTIONS]) sampled_states, sampeled_actions = self.format_replay_buffer_data(** rollout_data) return sampled_states, sampeled_actions def format_replay_buffer_data(self, **kwargs): states = kwargs[constants.DATA_OBSERVATIONS] actions = kwargs[constants.DATA_ACTIONS] sampled_states, sampeled_actions = self._sample_state_action_paris( states, actions) return sampled_states, sampeled_actions def _sample_state_action_paris(self, states, actions): """To make sure the number of state actions paris is always the same, this function sub samples the desired amount from a state action batch. This also acts as a kind of data augmentation as the representation of a single policy will consist of different state action pairs if called multiple times. Args: states (np.ndarray): Batch of states to sub sample from. actions (np.ndarray): Batch of actions (according to states) to sum sample from. Returns: sampled_states (np.ndarray): Sub sampled states sampeled_actions (np.ndarray): Sub sampled actions """ sample_id = np.random.choice(range(self.episode_length), size=self. num_state_action_pairs, replace=False) sampled_states = states[:, sample_id] sampeled_actions = actions[:, sample_id] return sampled_states, sampeled_actions @property def replay_data_info(self): return self._replay_data_info @property def replay_data_keys(self): return self._replay_data_keys class ParameterFormat(Enum): FlatParameters = FlatParamHandler NamedParameters = NamedParamHandler StateAction = StateActionHandler class MLPEmbeddingNetwork(nn.Module): def __init__(self): super(MLPEmbeddingNetwork, self).__init__() @abstractproperty def embedding_size(self) ->int: ... @abstractproperty def input_type(self) ->ParameterFormat: ... class StateActionEmbeddingNew(MLPEmbeddingNetwork): def __init__(self, num_state_action_pairs: 'int', observation_space: 'gym.Space', action_space: 'gym.Space'): super(StateActionEmbeddingNew, self).__init__() self.observation_shape = observation_space.shape self.action_shape = action_space.shape self.num_state_action_pairs = num_state_action_pairs self._embedding_size = num_state_action_pairs * (math.prod(self. observation_shape) + math.prod(self.action_shape)) @property def embedding_size(self) ->int: return self._embedding_size @property def input_type(self) ->ParameterFormat: return ParameterFormat.StateAction def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
Sebastian-Griesbach/Improving-Policy-Conditioned-Value-Functions
StateActionEmbedding
false
1,052
[ "MIT" ]
0
ec4125c5e056753e507df0406fcd60b6b6c3dc25
https://github.com/Sebastian-Griesbach/Improving-Policy-Conditioned-Value-Functions/tree/ec4125c5e056753e507df0406fcd60b6b6c3dc25
import math import torch import numpy as np from abc import ABC from abc import abstractmethod from abc import abstractproperty from torch import nn from enum import Enum def tensor_to_numpy(tensor): return tensor.detach().cpu().numpy() class MLPParamHandler(ABC): def __init__(self) ->None: """Interface for parameter handler. For Algorithms that require data on past model parameters this module handels this data such that is it is in the right format according to the specific algorithm. """ super().__init__() @abstractmethod def get_policy_replay_data(self, model: 'torch.nn.Module'): """Function to extract replay data from a module in a format that it can be saved to the replay buffer. Args: model (torch.nn.Module): Module to extract data from. """ ... @abstractmethod def get_policy_critic_data(self, model: 'torch.nn.Module'): """Function to extract data from a policy that the critic requires to evaluate it. Args: model (torch.nn.Module): Module to extract data from. """ ... @abstractmethod def format_replay_buffer_data(self, **kwargs): """Function to format data from the replay buffer such that it can be used as input to the critic afterwards. """ ... @abstractproperty def replay_data_keys(self): """Keys used to save data to the replay buffer. """ ... @abstractproperty def replay_data_info(self): """Description of the data needed to initialize the replay buffer. """ ... class FlatParamHandler(MLPParamHandler): def __init__(self, example_policy: 'torch.nn.Module') ->None: """Parameter handler that simply takes all parameters flattens them and saves them in one vector. Args: example_policy (torch.nn.Module): Example policy network to acquire shape of data. """ super().__init__() self._replay_data_keys = [constants.DATA_PARAMETERS] self._replay_data_info = {self._replay_data_keys[0]: {'shape': self .get_policy_critic_data(example_policy).shape}} def get_policy_replay_data(self, model: 'torch.nn.Module'): return {self._replay_data_keys[0]: tensor_to_numpy(torch.nn.utils. parameters_to_vector(model.parameters())).reshape(1, -1)} def get_policy_critic_data(self, model: 'torch.nn.Module'): return torch.nn.utils.parameters_to_vector(model.parameters()).reshape( 1, -1) def format_replay_buffer_data(self, **kwargs): return kwargs[self._replay_data_keys[0]] @property def replay_data_keys(self): return self._replay_data_keys @property def replay_data_info(self): return self._replay_data_info class NamedParamHandler(MLPParamHandler): def __init__(self, example_policy: 'torch.nn.Module') ->None: """Parameter handler that saves parameters in a dictionary shape such that the parameters are saved in a similar format of how they are used in the actual module. Useful if the Parameters are later reused similarly to how the are used within the module they are extracted from. Args: example_policy (torch.nn.Module): Example policy network to acquire structure of module and according dictionary. """ super().__init__() actor_parameter_dict = self.get_policy_critic_data(example_policy) self._replay_data_keys = actor_parameter_dict.keys() self._replay_data_info = {key: {'shape': actor_parameter_dict[key]. shape[1:]} for key in self._replay_data_keys} def get_policy_replay_data(self, model: 'torch.nn.Module'): batched_param_dict = self.get_policy_critic_data(model) return {key: tensor_to_numpy(value) for key, value in batched_param_dict.items()} def get_policy_critic_data(self, model: 'torch.nn.Module'): para # ... truncated (>4000 chars) for memory efficiency
GatedConv2d
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/mi/cmi6pvfnxxh5g6vyzi42gzjlvkcallq3paz67ydqnujyhpolr3bm.py # Topologically Sorted Source Nodes: [x, mask, sigmoid, x_1, x_2], Original ATen: [aten.convolution, aten.sigmoid, aten.mul, aten.elu] # Source node to ATen node mapping: # mask => convolution_1 # sigmoid => sigmoid # x => convolution # x_1 => mul # x_2 => expm1, gt, mul_1, mul_3, where # Graph fragment: # %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %convolution_1 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_4, %primals_5, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%convolution_1,), kwargs = {}) # %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, %sigmoid), kwargs = {}) # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%mul, 0), kwargs = {}) # %mul_1 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, 1.0), kwargs = {}) # %expm1 : [num_users=1] = call_function[target=torch.ops.aten.expm1.default](args = (%mul_1,), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expm1, 1.0), kwargs = {}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %mul_1, %mul_3), kwargs = {}) triton_poi_fused_convolution_elu_mul_sigmoid_0 = async_compile.triton('triton_poi_fused_convolution_elu_mul_sigmoid_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_elu_mul_sigmoid_0', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_elu_mul_sigmoid_0(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 9) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_out_ptr1 + (x3), xmask) tmp4 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tl.sigmoid(tmp5) tmp7 = tmp2 * tmp6 tmp8 = 0.0 tmp9 = tmp7 > tmp8 tmp10 = 1.0 tmp11 = tmp7 * tmp10 tmp12 = libdevice.expm1(tmp11) tmp13 = tmp12 * tmp10 tmp14 = tl.where(tmp9, tmp11, tmp13) tl.store(in_out_ptr0 + (x3), tmp2, xmask) tl.store(in_out_ptr1 + (x3), tmp5, xmask) tl.store(out_ptr0 + (x3), tmp14, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 3, 3), (36, 9, 3, 1)) # Topologically Sorted Source Nodes: [mask], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(primals_3, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 3, 3), (36, 9, 3, 1)) buf1 = buf0; del buf0 # reuse buf3 = buf2; del buf2 # reuse buf4 = empty_strided_cuda((4, 4, 3, 3), (36, 9, 3, 1), torch.float32) # Topologically Sorted Source Nodes: [x, mask, sigmoid, x_1, x_2], Original ATen: [aten.convolution, aten.sigmoid, aten.mul, aten.elu] stream0 = get_raw_stream(0) triton_poi_fused_convolution_elu_mul_sigmoid_0.run(buf1, buf3, primals_2, primals_5, buf4, 144, grid=grid(144), stream=stream0) del primals_2 del primals_5 return (buf4, primals_1, primals_3, primals_4, buf1, buf3, buf4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class GatedConv2d(torch.nn.Module): """ Gated Convlution layer with activation (default activation:LeakyReLU) Params: same as conv2d Input: The feature from last layer "I" Output:\\phi(f(I))*\\sigmoid(g(I)) """ def __init__(self, in_channels, out_channels, kernel_size, stride=1, dilation=1, bias=True, activation=torch.nn.ELU(1.0, inplace=True), dropout=0, gate_type='regular_conv'): super(GatedConv2d, self).__init__() self.stride = stride padding = dilation * (kernel_size - 1) // 2 self.activation = activation self.conv2d = nn.Conv2d(in_channels=in_channels, out_channels= out_channels, kernel_size=kernel_size, stride=stride, padding= padding, dilation=dilation, bias=bias) if gate_type == 'regular_conv': self.mask_conv2d = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride= stride, padding=padding, dilation=dilation, bias=bias) elif gate_type == 'single_channel': self.mask_conv2d = nn.Conv2d(in_channels=in_channels, out_channels=1, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias) elif gate_type == 'pixel_wise': self.mask_conv2d = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=stride, padding=0, dilation=dilation, bias=bias) elif gate_type == 'depth_separable': self.mask_conv2d = nn.Sequential(nn.Conv2d(in_channels= in_channels, out_channels=in_channels, kernel_size= kernel_size, stride=stride, padding=padding, dilation= dilation, bias=bias, groups=in_channels), nn.Conv2d( in_channels=in_channels, out_channels=out_channels, kernel_size=1, padding=0, bias=bias)) self.sigmoid = nn.Sigmoid() if dropout > 0: self.dropout = nn.Dropout(dropout) else: self.dropout = None for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight) def forward(self, input): x = self.conv2d(input) mask = self.mask_conv2d(input) x = x * self.sigmoid(mask) if self.activation is not None: x = self.activation(x) if self.dropout: x = self.dropout(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_elu_mul_sigmoid_0(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 9 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_out_ptr1 + x3, xmask) tmp4 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tl.sigmoid(tmp5) tmp7 = tmp2 * tmp6 tmp8 = 0.0 tmp9 = tmp7 > tmp8 tmp10 = 1.0 tmp11 = tmp7 * tmp10 tmp12 = libdevice.expm1(tmp11) tmp13 = tmp12 * tmp10 tmp14 = tl.where(tmp9, tmp11, tmp13) tl.store(in_out_ptr0 + x3, tmp2, xmask) tl.store(in_out_ptr1 + x3, tmp5, xmask) tl.store(out_ptr0 + x3, tmp14, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 3, 3), (36, 9, 3, 1)) buf2 = extern_kernels.convolution(primals_3, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 3, 3), (36, 9, 3, 1)) buf1 = buf0 del buf0 buf3 = buf2 del buf2 buf4 = empty_strided_cuda((4, 4, 3, 3), (36, 9, 3, 1), torch.float32) get_raw_stream(0) triton_poi_fused_convolution_elu_mul_sigmoid_0[grid(144)](buf1, buf3, primals_2, primals_5, buf4, 144, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 del primals_5 return buf4, primals_1, primals_3, primals_4, buf1, buf3, buf4 class GatedConv2dNew(torch.nn.Module): """ Gated Convlution layer with activation (default activation:LeakyReLU) Params: same as conv2d Input: The feature from last layer "I" Output:\\phi(f(I))*\\sigmoid(g(I)) """ def __init__(self, in_channels, out_channels, kernel_size, stride=1, dilation=1, bias=True, activation=torch.nn.ELU(1.0, inplace=True), dropout=0, gate_type='regular_conv'): super(GatedConv2dNew, self).__init__() self.stride = stride padding = dilation * (kernel_size - 1) // 2 self.activation = activation self.conv2d = nn.Conv2d(in_channels=in_channels, out_channels= out_channels, kernel_size=kernel_size, stride=stride, padding= padding, dilation=dilation, bias=bias) if gate_type == 'regular_conv': self.mask_conv2d = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride= stride, padding=padding, dilation=dilation, bias=bias) elif gate_type == 'single_channel': self.mask_conv2d = nn.Conv2d(in_channels=in_channels, out_channels=1, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias) elif gate_type == 'pixel_wise': self.mask_conv2d = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=stride, padding=0, dilation=dilation, bias=bias) elif gate_type == 'depth_separable': self.mask_conv2d = nn.Sequential(nn.Conv2d(in_channels= in_channels, out_channels=in_channels, kernel_size= kernel_size, stride=stride, padding=padding, dilation= dilation, bias=bias, groups=in_channels), nn.Conv2d( in_channels=in_channels, out_channels=out_channels, kernel_size=1, padding=0, bias=bias)) self.sigmoid = nn.Sigmoid() if dropout > 0: self.dropout = nn.Dropout(dropout) else: self.dropout = None for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight) def forward(self, input_0): primals_1 = self.conv2d.weight primals_2 = self.conv2d.bias primals_3 = self.mask_conv2d.weight primals_5 = self.mask_conv2d.bias primals_4 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
ShiraLightricks/3d-photo-inpainting
GatedConv2d
false
1,053
[ "MIT" ]
0
c42ac41576690b765e50f5281ddbfb58439ff36d
https://github.com/ShiraLightricks/3d-photo-inpainting/tree/c42ac41576690b765e50f5281ddbfb58439ff36d
import torch import torch.nn as nn class Model(torch.nn.Module): """ Gated Convlution layer with activation (default activation:LeakyReLU) Params: same as conv2d Input: The feature from last layer "I" Output:\\phi(f(I))*\\sigmoid(g(I)) """ def __init__(self, in_channels, out_channels, kernel_size, stride=1, dilation=1, bias=True, activation=torch.nn.ELU(1.0, inplace=True), dropout=0, gate_type='regular_conv'): super().__init__() self.stride = stride padding = dilation * (kernel_size - 1) // 2 self.activation = activation self.conv2d = nn.Conv2d(in_channels=in_channels, out_channels= out_channels, kernel_size=kernel_size, stride=stride, padding= padding, dilation=dilation, bias=bias) if gate_type == 'regular_conv': self.mask_conv2d = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride= stride, padding=padding, dilation=dilation, bias=bias) elif gate_type == 'single_channel': self.mask_conv2d = nn.Conv2d(in_channels=in_channels, out_channels=1, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias) elif gate_type == 'pixel_wise': self.mask_conv2d = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=stride, padding=0, dilation=dilation, bias=bias) elif gate_type == 'depth_separable': self.mask_conv2d = nn.Sequential(nn.Conv2d(in_channels= in_channels, out_channels=in_channels, kernel_size= kernel_size, stride=stride, padding=padding, dilation= dilation, bias=bias, groups=in_channels), nn.Conv2d( in_channels=in_channels, out_channels=out_channels, kernel_size=1, padding=0, bias=bias)) self.sigmoid = nn.Sigmoid() if dropout > 0: self.dropout = nn.Dropout(dropout) else: self.dropout = None for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight) def forward(self, input): x = self.conv2d(input) mask = self.mask_conv2d(input) x = x * self.sigmoid(mask) if self.activation is not None: x = self.activation(x) if self.dropout: x = self.dropout(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 4, 4]
CoordConv
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/nz/cnzr3enannjni75kec3qorz6jm6lyd5whz6u5l3ih55bgihwnb2u.py # Topologically Sorted Source Nodes: [ret], Original ATen: [aten.cat] # Source node to ATen node mapping: # ret => cat # Graph fragment: # %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %device_put, %device_put_1], 1), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = (xindex // 16) % 6 x3 = (xindex // 96) x4 = xindex % 16 x1 = (xindex // 4) % 4 x0 = xindex % 4 x5 = xindex tmp0 = x2 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x4 + (16*x2) + (64*x3)), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 5, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = x1 tmp11 = tmp10.to(tl.float32) tmp12 = 0.3333333333333333 tmp13 = tmp11 * tmp12 tmp14 = 2.0 tmp15 = tmp13 * tmp14 tmp16 = 1.0 tmp17 = tmp15 - tmp16 tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype) tmp19 = tl.where(tmp9, tmp17, tmp18) tmp20 = tmp0 >= tmp7 tmp21 = tl.full([1], 6, tl.int64) tmp22 = tmp0 < tmp21 tmp23 = x0 tmp24 = tmp23.to(tl.float32) tmp25 = tmp24 * tmp12 tmp26 = tmp25 * tmp14 tmp27 = tmp26 - tmp16 tmp28 = tl.full(tmp27.shape, 0.0, tmp27.dtype) tmp29 = tl.where(tmp20, tmp27, tmp28) tmp30 = tl.where(tmp9, tmp19, tmp29) tmp31 = tl.where(tmp4, tmp5, tmp30) tl.store(out_ptr0 + (x5), tmp31, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/32/c32v7egt4mupqssam3gmac2qgv3ujprjybthsgweflmot256qqw7.py # Topologically Sorted Source Nodes: [ret_1], Original ATen: [aten.convolution] # Source node to ATen node mapping: # ret_1 => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%cat, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 6, 1, 1), (6, 1, 1, 1)) assert_size_stride(primals_3, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 6, 4, 4), (96, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [ret], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(primals_1, buf0, 384, grid=grid(384), stream=stream0) del primals_1 # Topologically Sorted Source Nodes: [ret_1], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1)) buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [ret_1], Original ATen: [aten.convolution] triton_poi_fused_convolution_1.run(buf2, primals_3, 256, grid=grid(256), stream=stream0) del primals_3 return (buf2, primals_2, buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 6, 1, 1), (6, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class AddCoords(nn.Module): def __init__(self, with_r=False): super().__init__() self.with_r = with_r def forward(self, input_tensor): """ Args: input_tensor: shape(batch, channel, x_dim, y_dim) """ batch_size, _, x_dim, y_dim = input_tensor.size() xx_channel = torch.arange(x_dim).repeat(1, y_dim, 1) yy_channel = torch.arange(y_dim).repeat(1, x_dim, 1).transpose(1, 2) xx_channel = xx_channel.float() / (x_dim - 1) yy_channel = yy_channel.float() / (y_dim - 1) xx_channel = xx_channel * 2 - 1 yy_channel = yy_channel * 2 - 1 xx_channel = xx_channel.repeat(batch_size, 1, 1, 1).transpose(2, 3) yy_channel = yy_channel.repeat(batch_size, 1, 1, 1).transpose(2, 3) ret = torch.cat([input_tensor, xx_channel.type_as(input_tensor), yy_channel.type_as(input_tensor)], dim=1) if self.with_r: rr = torch.sqrt(torch.pow(xx_channel.type_as(input_tensor) - 0.5, 2) + torch.pow(yy_channel.type_as(input_tensor) - 0.5, 2)) ret = torch.cat([ret, rr], dim=1) return ret class CoordConv(nn.Module): def __init__(self, in_channels, out_channels, with_r=False, **kwargs): super().__init__() self.addcoords = AddCoords(with_r=with_r) in_size = in_channels + 2 if with_r: in_size += 1 self.conv = nn.Conv2d(in_size, out_channels, kernel_size=1, stride= 1, padding=0, dilation=1, groups=1, bias=True) def forward(self, x): ret = self.addcoords(x) ret = self.conv(ret) return ret def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex // 16 % 6 x3 = xindex // 96 x4 = xindex % 16 x1 = xindex // 4 % 4 x0 = xindex % 4 x5 = xindex tmp0 = x2 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x4 + 16 * x2 + 64 * x3), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 5, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = x1 tmp11 = tmp10.to(tl.float32) tmp12 = 0.3333333333333333 tmp13 = tmp11 * tmp12 tmp14 = 2.0 tmp15 = tmp13 * tmp14 tmp16 = 1.0 tmp17 = tmp15 - tmp16 tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype) tmp19 = tl.where(tmp9, tmp17, tmp18) tmp20 = tmp0 >= tmp7 tl.full([1], 6, tl.int64) tmp23 = x0 tmp24 = tmp23.to(tl.float32) tmp25 = tmp24 * tmp12 tmp26 = tmp25 * tmp14 tmp27 = tmp26 - tmp16 tmp28 = tl.full(tmp27.shape, 0.0, tmp27.dtype) tmp29 = tl.where(tmp20, tmp27, tmp28) tmp30 = tl.where(tmp9, tmp19, tmp29) tmp31 = tl.where(tmp4, tmp5, tmp30) tl.store(out_ptr0 + x5, tmp31, xmask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 6, 1, 1), (6, 1, 1, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 6, 4, 4), (96, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(384)](primals_1, buf0, 384, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1)) buf2 = buf1 del buf1 triton_poi_fused_convolution_1[grid(256)](buf2, primals_3, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_3 return buf2, primals_2, buf0 class AddCoords(nn.Module): def __init__(self, with_r=False): super().__init__() self.with_r = with_r def forward(self, input_tensor): """ Args: input_tensor: shape(batch, channel, x_dim, y_dim) """ batch_size, _, x_dim, y_dim = input_tensor.size() xx_channel = torch.arange(x_dim).repeat(1, y_dim, 1) yy_channel = torch.arange(y_dim).repeat(1, x_dim, 1).transpose(1, 2) xx_channel = xx_channel.float() / (x_dim - 1) yy_channel = yy_channel.float() / (y_dim - 1) xx_channel = xx_channel * 2 - 1 yy_channel = yy_channel * 2 - 1 xx_channel = xx_channel.repeat(batch_size, 1, 1, 1).transpose(2, 3) yy_channel = yy_channel.repeat(batch_size, 1, 1, 1).transpose(2, 3) ret = torch.cat([input_tensor, xx_channel.type_as(input_tensor), yy_channel.type_as(input_tensor)], dim=1) if self.with_r: rr = torch.sqrt(torch.pow(xx_channel.type_as(input_tensor) - 0.5, 2) + torch.pow(yy_channel.type_as(input_tensor) - 0.5, 2)) ret = torch.cat([ret, rr], dim=1) return ret class CoordConvNew(nn.Module): def __init__(self, in_channels, out_channels, with_r=False, **kwargs): super().__init__() self.addcoords = AddCoords(with_r=with_r) in_size = in_channels + 2 if with_r: in_size += 1 self.conv = nn.Conv2d(in_size, out_channels, kernel_size=1, stride= 1, padding=0, dilation=1, groups=1, bias=True) def forward(self, input_0): primals_2 = self.conv.weight primals_3 = self.conv.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
SeunghwanByun/Real-Time-Road-Detection-Network
CoordConv
false
1,054
[ "MIT" ]
0
bc46615adef0e2b1a9a03dd4951559ca5849e6e1
https://github.com/SeunghwanByun/Real-Time-Road-Detection-Network/tree/bc46615adef0e2b1a9a03dd4951559ca5849e6e1
import torch import torch.nn as nn class AddCoords(nn.Module): def __init__(self, with_r=False): super().__init__() self.with_r = with_r def forward(self, input_tensor): """ Args: input_tensor: shape(batch, channel, x_dim, y_dim) """ batch_size, _, x_dim, y_dim = input_tensor.size() xx_channel = torch.arange(x_dim).repeat(1, y_dim, 1) yy_channel = torch.arange(y_dim).repeat(1, x_dim, 1).transpose(1, 2) xx_channel = xx_channel.float() / (x_dim - 1) yy_channel = yy_channel.float() / (y_dim - 1) xx_channel = xx_channel * 2 - 1 yy_channel = yy_channel * 2 - 1 xx_channel = xx_channel.repeat(batch_size, 1, 1, 1).transpose(2, 3) yy_channel = yy_channel.repeat(batch_size, 1, 1, 1).transpose(2, 3) ret = torch.cat([input_tensor, xx_channel.type_as(input_tensor), yy_channel.type_as(input_tensor)], dim=1) if self.with_r: rr = torch.sqrt(torch.pow(xx_channel.type_as(input_tensor) - 0.5, 2) + torch.pow(yy_channel.type_as(input_tensor) - 0.5, 2)) ret = torch.cat([ret, rr], dim=1) return ret class Model(nn.Module): def __init__(self, in_channels, out_channels, with_r=False, **kwargs): super().__init__() self.addcoords = AddCoords(with_r=with_r) in_size = in_channels + 2 if with_r: in_size += 1 self.conv = nn.Conv2d(in_size, out_channels, kernel_size=1, stride= 1, padding=0, dilation=1, groups=1, bias=True) def forward(self, x): ret = self.addcoords(x) ret = self.conv(ret) return ret def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 4]
ScaledDotProductAttention
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/nu/cnuc7ivckuuly7yn2763pwt3sw72jd6vuwpeeu4sfespm5iz7fq4.py # Topologically Sorted Source Nodes: [attention_score_2], Original ATen: [aten._softmax] # Source node to ATen node mapping: # attention_score_2 => exp # Graph fragment: # %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_5, 1), kwargs = {}) # %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [-1], True), kwargs = {}) # %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {}) # %div_tensor : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor, 2.0), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor,), kwargs = {}) triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp3 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = 0.5 tmp16 = tmp14 * tmp15 tmp17 = tl_math.exp(tmp16) tl.store(out_ptr0 + (x2), tmp17, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/fj/cfjl47pvhwbpfbvh6rfehwy5ijxc5p3zgkld2lwf3mw5bl6pbkak.py # Topologically Sorted Source Nodes: [attention_score_2], Original ATen: [aten._softmax] # Source node to ATen node mapping: # attention_score_2 => div_1, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [attention_score], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(arg0_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1), 0), out=buf0) del arg0_1 del arg1_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [attention_score_2], Original ATen: [aten._softmax] stream0 = get_raw_stream(0) triton_poi_fused__softmax_0.run(buf0, buf1, 256, grid=grid(256), stream=stream0) buf2 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [attention_score_2], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf1, buf2, 256, grid=grid(256), stream=stream0) buf3 = reinterpret_tensor(buf1, (16, 4, 4), (16, 4, 1), 0); del buf1 # reuse # Topologically Sorted Source Nodes: [result], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(arg2_1, (16, 4, 4), (16, 4, 1), 0), out=buf3) del arg2_1 return (reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0), buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1, arg2_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch import torch.nn as nn import torch.nn.functional as F class ScaledDotProductAttention(nn.Module): def __init__(self): super(ScaledDotProductAttention, self).__init__() def forward(self, query, key, value, mask=None): _1, _2, query_sequence_length, _3 = query.size() batch_size, num_head, key_sequence_length, size_per_head = key.size() query = query.view(batch_size, num_head, query_sequence_length, size_per_head) key = key.view(batch_size, num_head, size_per_head, key_sequence_length ) attention_score = torch.einsum('abcd, abde -> abce', query, key) attention_score = attention_score / math.sqrt(size_per_head) if mask is not None: attention_score = attention_score.masked_fill(mask == 0, - 1000000000.0) attention_score = F.softmax(attention_score, dim=-1) result = attention_score @ value return result, attention_score def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = 0.5 tmp16 = tmp14 * tmp15 tmp17 = tl_math.exp(tmp16) tl.store(out_ptr0 + x2, tmp17, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(arg0_1, (16, 4, 4), (16, 4, 1 ), 0), reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1), 0), out=buf0) del arg0_1 del arg1_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(256)](buf0, buf1, 256, XBLOCK=128, num_warps=4, num_stages=1) buf2 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 triton_poi_fused__softmax_1[grid(256)](buf1, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1) buf3 = reinterpret_tensor(buf1, (16, 4, 4), (16, 4, 1), 0) del buf1 extern_kernels.bmm(reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(arg2_1, (16, 4, 4), (16, 4, 1), 0), out=buf3 ) del arg2_1 return reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0), buf2 class ScaledDotProductAttentionNew(nn.Module): def __init__(self): super(ScaledDotProductAttentionNew, self).__init__() def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0], output[1]
SeungoneKim/Transformer_implementation
ScaledDotProductAttention
false
1,055
[ "Apache-2.0" ]
0
a52bf552eb645fc9bfb812cc26842fc147d6c008
https://github.com/SeungoneKim/Transformer_implementation/tree/a52bf552eb645fc9bfb812cc26842fc147d6c008
import math import torch import torch.nn as nn import torch.nn.functional as F class Model(nn.Module): def __init__(self): super().__init__() def forward(self, query, key, value, mask=None): _1, _2, query_sequence_length, _3 = query.size() batch_size, num_head, key_sequence_length, size_per_head = key.size() query = query.view(batch_size, num_head, query_sequence_length, size_per_head) key = key.view(batch_size, num_head, size_per_head, key_sequence_length ) attention_score = torch.einsum('abcd, abde -> abce', query, key) attention_score = attention_score / math.sqrt(size_per_head) if mask is not None: attention_score = attention_score.masked_fill(mask == 0, - 1000000000.0) attention_score = F.softmax(attention_score, dim=-1) result = attention_score @ value return result, attention_score def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return []
Swish
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/lc/clcsmfjtwsnzu2llmpsmvwrt5ojf76ozdv5ttluhi2gtpojjo6lv.py # Topologically Sorted Source Nodes: [sigmoid, mul_], Original ATen: [aten.sigmoid, aten.mul] # Source node to ATen node mapping: # mul_ => mul # sigmoid => sigmoid # Graph fragment: # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%arg0_1,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %sigmoid), kwargs = {}) # %copy_ : [num_users=1] = call_function[target=torch.ops.aten.copy_.default](args = (%arg0_1, %mul), kwargs = {}) triton_poi_fused_mul_sigmoid_0 = async_compile.triton('triton_poi_fused_mul_sigmoid_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sigmoid_0', 'mutated_arg_names': ['in_ptr0', 'out_ptr1'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_sigmoid_0(in_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl.sigmoid(tmp0) tmp2 = tmp0 * tmp1 tl.store(out_ptr1 + (x0), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [sigmoid, mul_], Original ATen: [aten.sigmoid, aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_mul_sigmoid_0.run(arg0_1, arg0_1, 256, grid=grid(256), stream=stream0) return (arg0_1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class Swish(nn.Module): def __init__(self, inplace=True): super(Swish, self).__init__() self.inplace = inplace def forward(self, x): return x.mul_(x.sigmoid()) if self.inplace else x.mul(x.sigmoid()) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_mul_sigmoid_0(in_ptr0, out_ptr1, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.sigmoid(tmp0) tmp2 = tmp0 * tmp1 tl.store(out_ptr1 + x0, tmp2, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) get_raw_stream(0) triton_poi_fused_mul_sigmoid_0[grid(256)](arg0_1, arg0_1, 256, XBLOCK=128, num_warps=4, num_stages=1) return arg0_1, class SwishNew(nn.Module): def __init__(self, inplace=True): super(SwishNew, self).__init__() self.inplace = inplace def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
ShowLo/Networks
Swish
false
1,056
[ "MIT" ]
0
48f8545783966c383b6c3b600fbe37a15ea8ae3c
https://github.com/ShowLo/Networks/tree/48f8545783966c383b6c3b600fbe37a15ea8ae3c
import torch import torch.nn as nn class Model(nn.Module): def __init__(self, inplace=True): super().__init__() self.inplace = inplace def forward(self, x): return x.mul_(x.sigmoid()) if self.inplace else x.mul(x.sigmoid()) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
Bicubic
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/j5/cj5es5bvythkhvipgt3xmi2elrhehimxnb5sn3noat3qbbqagqx7.py # Topologically Sorted Source Nodes: [interpolate], Original ATen: [aten.floor, aten.arange, aten._to_copy, aten.add, aten.mul, aten.sub, aten._unsafe_index, aten.clamp, aten.rsub] # Source node to ATen node mapping: # interpolate => _unsafe_index, _unsafe_index_1, _unsafe_index_10, _unsafe_index_11, _unsafe_index_12, _unsafe_index_13, _unsafe_index_14, _unsafe_index_15, _unsafe_index_2, _unsafe_index_3, _unsafe_index_4, _unsafe_index_5, _unsafe_index_6, _unsafe_index_7, _unsafe_index_8, _unsafe_index_9, add, add_10, add_11, add_12, add_13, add_14, add_15, add_16, add_17, add_18, add_19, add_20, add_21, add_22, add_23, add_24, add_25, add_26, add_27, add_28, add_29, add_30, add_6, add_7, add_8, add_9, clamp_max, clamp_max_1, clamp_min, clamp_min_1, convert_element_type_1, floor, floor_1, iota_1, mul, mul_10, mul_11, mul_12, mul_13, mul_14, mul_15, mul_16, mul_17, mul_18, mul_19, mul_2, mul_20, mul_21, mul_22, mul_23, mul_24, mul_25, mul_26, mul_27, mul_28, mul_29, mul_3, mul_30, mul_31, mul_32, mul_33, mul_34, mul_35, mul_36, mul_37, mul_38, mul_39, mul_4, mul_40, mul_41, mul_42, mul_43, mul_44, mul_45, mul_5, mul_6, mul_7, mul_8, mul_9, sub, sub_10, sub_11, sub_12, sub_13, sub_14, sub_15, sub_16, sub_17, sub_18, sub_19, sub_2, sub_20, sub_21, sub_3, sub_6, sub_7, sub_8, sub_9 # Graph fragment: # %floor_1 : [num_users=2] = call_function[target=torch.ops.aten.floor.default](args = (%unsqueeze,), kwargs = {}) # %iota_1 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (4,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False}) # %convert_element_type_1 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%iota_1, torch.float32), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_1, 0.5), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, 1.0), kwargs = {}) # %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, 0.5), kwargs = {}) # %floor : [num_users=2] = call_function[target=torch.ops.aten.floor.default](args = (%sub,), kwargs = {}) # %_unsafe_index : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%arg0_1, [None, None, %clamp_max_2, %clamp_max_3]), kwargs = {}) # %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %floor), kwargs = {}) # %clamp_min_1 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_3, 0.0), kwargs = {}) # %clamp_max_1 : [num_users=6] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_1, 1.0), kwargs = {}) # %add_6 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%clamp_max_1, 1.0), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_6, -0.75), kwargs = {}) # %sub_6 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_2, -3.75), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_6, %add_6), kwargs = {}) # %add_7 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_3, -6.0), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_7, %add_6), kwargs = {}) # %sub_7 : [num_users=4] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_4, -3.0), kwargs = {}) # %mul_26 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%_unsafe_index, %sub_7), kwargs = {}) # %_unsafe_index_1 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%arg0_1, [None, None, %clamp_max_4, %clamp_max_5]), kwargs = {}) # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%clamp_max_1, 1.25), kwargs = {}) # %sub_8 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_5, 2.25), kwargs = {}) # %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_8, %clamp_max_1), kwargs = {}) # %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_6, %clamp_max_1), kwargs = {}) # %add_8 : [num_users=4] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_7, 1), kwargs = {}) # %mul_27 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%_unsafe_index_1, %add_8), kwargs = {}) # %add_16 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_26, %mul_27), kwargs = {}) # %_unsafe_index_2 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%arg0_1, [None, None, %clamp_max_6, %clamp_max_7]), kwargs = {}) # %sub_9 : [num_users=3] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %clamp_max_1), kwargs = {}) # %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_9, 1.25), kwargs = {}) # %sub_10 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_8, 2.25), kwargs = {}) # %mul_9 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_10, %sub_9), kwargs = {}) # %mul_10 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_9, %sub_9), kwargs = {}) # %add_9 : [num_users=4] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_10, 1), kwargs = {}) # %mul_28 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%_unsafe_index_2, %add_9), kwargs = {}) # %add_17 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_16, %mul_28), kwargs = {}) # %_unsafe_index_3 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%arg0_1, [None, None, %clamp_max_8, %clamp_max_9]), kwargs = {}) # %sub_11 : [num_users=3] = call_function[target=torch.ops.aten.sub.Tensor](args = (2.0, %clamp_max_1), kwargs = {}) # %mul_11 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_11, -0.75), kwargs = {}) # %sub_12 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_11, -3.75), kwargs = {}) # %mul_12 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_12, %sub_11), kwargs = {}) # %add_10 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_12, -6.0), kwargs = {}) # %mul_13 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_10, %sub_11), kwargs = {}) # %sub_13 : [num_users=4] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_13, -3.0), kwargs = {}) # %mul_29 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%_unsafe_index_3, %sub_13), kwargs = {}) # %add_18 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_17, %mul_29), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%unsqueeze, %floor_1), kwargs = {}) # %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_2, 0.0), kwargs = {}) # %clamp_max : [num_users=6] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min, 1.0), kwargs = {}) # %add_11 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%clamp_max, 1.0), kwargs = {}) # %mul_14 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_11, -0.75), kwargs = {}) # %sub_14 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_14, -3.75), kwargs = {}) # %mul_15 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_14, %add_11), kwargs = {}) # %add_12 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_15, -6.0), kwargs = {}) # %mul_16 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_12, %add_11), kwargs = {}) # %sub_15 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_16, -3.0), kwargs = {}) # %mul_42 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_18, %sub_15), kwargs = {}) # %_unsafe_index_4 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%arg0_1, [None, None, %clamp_max_10, %clamp_max_11]), kwargs = {}) # %mul_30 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%_unsafe_index_4, %sub_7), kwargs = {}) # %_unsafe_index_5 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%arg0_1, [None, None, %clamp_max_12, %clamp_max_13]), kwargs = {}) # %mul_31 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%_unsafe_index_5, %add_8), kwargs = {}) # %add_19 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_30, %mul_31), kwargs = {}) # %_unsafe_index_6 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%arg0_1, [None, None, %clamp_max_14, %clamp_max_15]), kwargs = {}) # %mul_32 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%_unsafe_index_6, %add_9), kwargs = {}) # %add_20 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_19, %mul_32), kwargs = {}) # %_unsafe_index_7 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%arg0_1, [None, None, %clamp_max_16, %clamp_max_17]), kwargs = {}) # %mul_33 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%_unsafe_index_7, %sub_13), kwargs = {}) # %add_21 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_20, %mul_33), kwargs = {}) # %mul_17 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%clamp_max, 1.25), kwargs = {}) # %sub_16 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_17, 2.25), kwargs = {}) # %mul_18 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_16, %clamp_max), kwargs = {}) # %mul_19 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_18, %clamp_max), kwargs = {}) # %add_13 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_19, 1), kwargs = {}) # %mul_43 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_21, %add_13), kwargs = {}) # %add_28 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_42, %mul_43), kwargs = {}) # %_unsafe_index_8 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%arg0_1, [None, None, %clamp_max_18, %clamp_max_19]), kwargs = {}) # %mul_34 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%_unsafe_index_8, %sub_7), kwargs = {}) # %_unsafe_index_9 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%arg0_1, [None, None, %clamp_max_20, %clamp_max_21]), kwargs = {}) # %mul_35 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%_unsafe_index_9, %add_8), kwargs = {}) # %add_22 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_34, %mul_35), kwargs = {}) # %_unsafe_index_10 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%arg0_1, [None, None, %clamp_max_22, %clamp_max_23]), kwargs = {}) # %mul_36 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%_unsafe_index_10, %add_9), kwargs = {}) # %add_23 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_22, %mul_36), kwargs = {}) # %_unsafe_index_11 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%arg0_1, [None, None, %clamp_max_24, %clamp_max_25]), kwargs = {}) # %mul_37 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%_unsafe_index_11, %sub_13), kwargs = {}) # %add_24 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_23, %mul_37), kwargs = {}) # %sub_17 : [num_users=3] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %clamp_max), kwargs = {}) # %mul_20 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_17, 1.25), kwargs = {}) # %sub_18 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_20, 2.25), kwargs = {}) # %mul_21 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_18, %sub_17), kwargs = {}) # %mul_22 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_21, %sub_17), kwargs = {}) # %add_14 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_22, 1), kwargs = {}) # %mul_44 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_24, %add_14), kwargs = {}) # %add_29 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_28, %mul_44), kwargs = {}) # %_unsafe_index_12 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%arg0_1, [None, None, %clamp_max_26, %clamp_max_27]), kwargs = {}) # %mul_38 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%_unsafe_index_12, %sub_7), kwargs = {}) # %_unsafe_index_13 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%arg0_1, [None, None, %clamp_max_28, %clamp_max_29]), kwargs = {}) # %mul_39 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%_unsafe_index_13, %add_8), kwargs = {}) # %add_25 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_38, %mul_39), kwargs = {}) # %_unsafe_index_14 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%arg0_1, [None, None, %clamp_max_30, %clamp_max_31]), kwargs = {}) # %mul_40 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%_unsafe_index_14, %add_9), kwargs = {}) # %add_26 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_25, %mul_40), kwargs = {}) # %_unsafe_index_15 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%arg0_1, [None, None, %clamp_max_32, %clamp_max_33]), kwargs = {}) # %mul_41 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%_unsafe_index_15, %sub_13), kwargs = {}) # %add_27 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_26, %mul_41), kwargs = {}) # %sub_19 : [num_users=3] = call_function[target=torch.ops.aten.sub.Tensor](args = (2.0, %clamp_max), kwargs = {}) # %mul_23 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_19, -0.75), kwargs = {}) # %sub_20 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_23, -3.75), kwargs = {}) # %mul_24 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_20, %sub_19), kwargs = {}) # %add_15 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_24, -6.0), kwargs = {}) # %mul_25 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_15, %sub_19), kwargs = {}) # %sub_21 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_25, -3.0), kwargs = {}) # %mul_45 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_27, %sub_21), kwargs = {}) # %add_30 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_29, %mul_45), kwargs = {}) triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_floor_mul_rsub_sub_0 = async_compile.triton('triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_floor_mul_rsub_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_floor_mul_rsub_sub_0', 'mutated_arg_names': ['in_out_ptr1'], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_floor_mul_rsub_sub_0(in_out_ptr1, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 4) % 4 x0 = xindex % 4 x2 = (xindex // 16) x3 = xindex tmp0 = x1 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = 1.0 tmp5 = tmp3 * tmp4 tmp6 = tmp5 - tmp2 tmp7 = libdevice.floor(tmp6) tmp8 = tmp7.to(tl.int32) tmp9 = tl.full([1], 1, tl.int64) tmp10 = tmp8 - tmp9 tmp11 = tl.full([1], 0, tl.int64) tmp12 = triton_helpers.maximum(tmp10, tmp11) tmp13 = tl.full([1], 3, tl.int64) tmp14 = triton_helpers.minimum(tmp12, tmp13) tmp15 = x0 tmp16 = tmp15.to(tl.float32) tmp17 = tmp16 + tmp2 tmp18 = tmp17 * tmp4 tmp19 = tmp18 - tmp2 tmp20 = libdevice.floor(tmp19) tmp21 = tmp20.to(tl.int32) tmp22 = tmp21 - tmp9 tmp23 = triton_helpers.maximum(tmp22, tmp11) tmp24 = triton_helpers.minimum(tmp23, tmp13) tmp25 = tl.load(in_ptr0 + (tmp24 + (4*tmp14) + (16*x2)), xmask, eviction_policy='evict_last') tmp26 = tmp19 - tmp20 tmp27 = 0.0 tmp28 = triton_helpers.maximum(tmp26, tmp27) tmp29 = triton_helpers.minimum(tmp28, tmp4) tmp30 = tmp29 + tmp4 tmp31 = -0.75 tmp32 = tmp30 * tmp31 tmp33 = -3.75 tmp34 = tmp32 - tmp33 tmp35 = tmp34 * tmp30 tmp36 = -6.0 tmp37 = tmp35 + tmp36 tmp38 = tmp37 * tmp30 tmp39 = -3.0 tmp40 = tmp38 - tmp39 tmp41 = tmp25 * tmp40 tmp42 = triton_helpers.maximum(tmp21, tmp11) tmp43 = triton_helpers.minimum(tmp42, tmp13) tmp44 = tl.load(in_ptr0 + (tmp43 + (4*tmp14) + (16*x2)), xmask, eviction_policy='evict_last') tmp45 = 1.25 tmp46 = tmp29 * tmp45 tmp47 = 2.25 tmp48 = tmp46 - tmp47 tmp49 = tmp48 * tmp29 tmp50 = tmp49 * tmp29 tmp51 = tmp50 + tmp4 tmp52 = tmp44 * tmp51 tmp53 = tmp21 + tmp9 tmp54 = triton_helpers.maximum(tmp53, tmp11) tmp55 = triton_helpers.minimum(tmp54, tmp13) tmp56 = tl.load(in_ptr0 + (tmp55 + (4*tmp14) + (16*x2)), xmask, eviction_policy='evict_last') tmp57 = tmp4 - tmp29 tmp58 = tmp57 * tmp45 tmp59 = tmp58 - tmp47 tmp60 = tmp59 * tmp57 tmp61 = tmp60 * tmp57 tmp62 = tmp61 + tmp4 tmp63 = tmp56 * tmp62 tmp64 = tl.full([1], 2, tl.int64) tmp65 = tmp21 + tmp64 tmp66 = triton_helpers.maximum(tmp65, tmp11) tmp67 = triton_helpers.minimum(tmp66, tmp13) tmp68 = tl.load(in_ptr0 + (tmp67 + (4*tmp14) + (16*x2)), xmask, eviction_policy='evict_last') tmp69 = 2.0 tmp70 = tmp69 - tmp29 tmp71 = tmp70 * tmp31 tmp72 = tmp71 - tmp33 tmp73 = tmp72 * tmp70 tmp74 = tmp73 + tmp36 tmp75 = tmp74 * tmp70 tmp76 = tmp75 - tmp39 tmp77 = tmp68 * tmp76 tmp78 = tmp41 + tmp52 tmp79 = tmp78 + tmp63 tmp80 = tmp79 + tmp77 tmp81 = tmp6 - tmp7 tmp82 = triton_helpers.maximum(tmp81, tmp27) tmp83 = triton_helpers.minimum(tmp82, tmp4) tmp84 = tmp83 + tmp4 tmp85 = tmp84 * tmp31 tmp86 = tmp85 - tmp33 tmp87 = tmp86 * tmp84 tmp88 = tmp87 + tmp36 tmp89 = tmp88 * tmp84 tmp90 = tmp89 - tmp39 tmp91 = tmp80 * tmp90 tmp92 = triton_helpers.maximum(tmp8, tmp11) tmp93 = triton_helpers.minimum(tmp92, tmp13) tmp94 = tl.load(in_ptr0 + (tmp24 + (4*tmp93) + (16*x2)), xmask, eviction_policy='evict_last') tmp95 = tmp94 * tmp40 tmp96 = tl.load(in_ptr0 + (tmp43 + (4*tmp93) + (16*x2)), xmask, eviction_policy='evict_last') tmp97 = tmp96 * tmp51 tmp98 = tl.load(in_ptr0 + (tmp55 + (4*tmp93) + (16*x2)), xmask, eviction_policy='evict_last') tmp99 = tmp98 * tmp62 tmp100 = tl.load(in_ptr0 + (tmp67 + (4*tmp93) + (16*x2)), xmask, eviction_policy='evict_last') tmp101 = tmp100 * tmp76 tmp102 = tmp8 + tmp9 tmp103 = triton_helpers.maximum(tmp102, tmp11) tmp104 = triton_helpers.minimum(tmp103, tmp13) tmp105 = tl.load(in_ptr0 + (tmp24 + (4*tmp104) + (16*x2)), xmask, eviction_policy='evict_last') tmp106 = tmp105 * tmp40 tmp107 = tl.load(in_ptr0 + (tmp43 + (4*tmp104) + (16*x2)), xmask, eviction_policy='evict_last') tmp108 = tmp107 * tmp51 tmp109 = tl.load(in_ptr0 + (tmp55 + (4*tmp104) + (16*x2)), xmask, eviction_policy='evict_last') tmp110 = tmp109 * tmp62 tmp111 = tl.load(in_ptr0 + (tmp67 + (4*tmp104) + (16*x2)), xmask, eviction_policy='evict_last') tmp112 = tmp111 * tmp76 tmp113 = tmp95 + tmp97 tmp114 = tmp113 + tmp99 tmp115 = tmp114 + tmp101 tmp116 = tmp83 * tmp45 tmp117 = tmp116 - tmp47 tmp118 = tmp117 * tmp83 tmp119 = tmp118 * tmp83 tmp120 = tmp119 + tmp4 tmp121 = tmp115 * tmp120 tmp122 = tmp91 + tmp121 tmp123 = tmp106 + tmp108 tmp124 = tmp123 + tmp110 tmp125 = tmp124 + tmp112 tmp126 = tmp4 - tmp83 tmp127 = tmp126 * tmp45 tmp128 = tmp127 - tmp47 tmp129 = tmp128 * tmp126 tmp130 = tmp129 * tmp126 tmp131 = tmp130 + tmp4 tmp132 = tmp125 * tmp131 tmp133 = tmp122 + tmp132 tmp134 = tmp8 + tmp64 tmp135 = triton_helpers.maximum(tmp134, tmp11) tmp136 = triton_helpers.minimum(tmp135, tmp13) tmp137 = tl.load(in_ptr0 + (tmp24 + (4*tmp136) + (16*x2)), xmask, eviction_policy='evict_last') tmp138 = tmp137 * tmp40 tmp139 = tl.load(in_ptr0 + (tmp43 + (4*tmp136) + (16*x2)), xmask, eviction_policy='evict_last') tmp140 = tmp139 * tmp51 tmp141 = tl.load(in_ptr0 + (tmp55 + (4*tmp136) + (16*x2)), xmask, eviction_policy='evict_last') tmp142 = tmp141 * tmp62 tmp143 = tl.load(in_ptr0 + (tmp67 + (4*tmp136) + (16*x2)), xmask, eviction_policy='evict_last') tmp144 = tmp143 * tmp76 tmp145 = tmp138 + tmp140 tmp146 = tmp145 + tmp142 tmp147 = tmp146 + tmp144 tmp148 = tmp69 - tmp83 tmp149 = tmp148 * tmp31 tmp150 = tmp149 - tmp33 tmp151 = tmp150 * tmp148 tmp152 = tmp151 + tmp36 tmp153 = tmp152 * tmp148 tmp154 = tmp153 - tmp39 tmp155 = tmp147 * tmp154 tmp156 = tmp133 + tmp155 tl.store(in_out_ptr1 + (x3), tmp156, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf13 = buf10; del buf10 # reuse buf19 = buf13; del buf13 # reuse # Topologically Sorted Source Nodes: [interpolate], Original ATen: [aten.floor, aten.arange, aten._to_copy, aten.add, aten.mul, aten.sub, aten._unsafe_index, aten.clamp, aten.rsub] stream0 = get_raw_stream(0) triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_floor_mul_rsub_sub_0.run(buf19, arg0_1, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf19, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from torch.nn import Module import torch import torch.nn.functional as F class Bicubic(Module): def __init__(self, scale_factor): super().__init__() self.scale_factor = scale_factor def forward(self, x): return F.interpolate(x, scale_factor=self.scale_factor, mode='bicubic') def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'scale_factor': 1.0}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice from torch.nn import Module assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_floor_mul_rsub_sub_0( in_out_ptr1, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 4 x0 = xindex % 4 x2 = xindex // 16 x3 = xindex tmp0 = x1 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = 1.0 tmp5 = tmp3 * tmp4 tmp6 = tmp5 - tmp2 tmp7 = libdevice.floor(tmp6) tmp8 = tmp7.to(tl.int32) tmp9 = tl.full([1], 1, tl.int64) tmp10 = tmp8 - tmp9 tmp11 = tl.full([1], 0, tl.int64) tmp12 = triton_helpers.maximum(tmp10, tmp11) tmp13 = tl.full([1], 3, tl.int64) tmp14 = triton_helpers.minimum(tmp12, tmp13) tmp15 = x0 tmp16 = tmp15.to(tl.float32) tmp17 = tmp16 + tmp2 tmp18 = tmp17 * tmp4 tmp19 = tmp18 - tmp2 tmp20 = libdevice.floor(tmp19) tmp21 = tmp20.to(tl.int32) tmp22 = tmp21 - tmp9 tmp23 = triton_helpers.maximum(tmp22, tmp11) tmp24 = triton_helpers.minimum(tmp23, tmp13) tmp25 = tl.load(in_ptr0 + (tmp24 + 4 * tmp14 + 16 * x2), xmask, eviction_policy='evict_last') tmp26 = tmp19 - tmp20 tmp27 = 0.0 tmp28 = triton_helpers.maximum(tmp26, tmp27) tmp29 = triton_helpers.minimum(tmp28, tmp4) tmp30 = tmp29 + tmp4 tmp31 = -0.75 tmp32 = tmp30 * tmp31 tmp33 = -3.75 tmp34 = tmp32 - tmp33 tmp35 = tmp34 * tmp30 tmp36 = -6.0 tmp37 = tmp35 + tmp36 tmp38 = tmp37 * tmp30 tmp39 = -3.0 tmp40 = tmp38 - tmp39 tmp41 = tmp25 * tmp40 tmp42 = triton_helpers.maximum(tmp21, tmp11) tmp43 = triton_helpers.minimum(tmp42, tmp13) tmp44 = tl.load(in_ptr0 + (tmp43 + 4 * tmp14 + 16 * x2), xmask, eviction_policy='evict_last') tmp45 = 1.25 tmp46 = tmp29 * tmp45 tmp47 = 2.25 tmp48 = tmp46 - tmp47 tmp49 = tmp48 * tmp29 tmp50 = tmp49 * tmp29 tmp51 = tmp50 + tmp4 tmp52 = tmp44 * tmp51 tmp53 = tmp21 + tmp9 tmp54 = triton_helpers.maximum(tmp53, tmp11) tmp55 = triton_helpers.minimum(tmp54, tmp13) tmp56 = tl.load(in_ptr0 + (tmp55 + 4 * tmp14 + 16 * x2), xmask, eviction_policy='evict_last') tmp57 = tmp4 - tmp29 tmp58 = tmp57 * tmp45 tmp59 = tmp58 - tmp47 tmp60 = tmp59 * tmp57 tmp61 = tmp60 * tmp57 tmp62 = tmp61 + tmp4 tmp63 = tmp56 * tmp62 tmp64 = tl.full([1], 2, tl.int64) tmp65 = tmp21 + tmp64 tmp66 = triton_helpers.maximum(tmp65, tmp11) tmp67 = triton_helpers.minimum(tmp66, tmp13) tmp68 = tl.load(in_ptr0 + (tmp67 + 4 * tmp14 + 16 * x2), xmask, eviction_policy='evict_last') tmp69 = 2.0 tmp70 = tmp69 - tmp29 tmp71 = tmp70 * tmp31 tmp72 = tmp71 - tmp33 tmp73 = tmp72 * tmp70 tmp74 = tmp73 + tmp36 tmp75 = tmp74 * tmp70 tmp76 = tmp75 - tmp39 tmp77 = tmp68 * tmp76 tmp78 = tmp41 + tmp52 tmp79 = tmp78 + tmp63 tmp80 = tmp79 + tmp77 tmp81 = tmp6 - tmp7 tmp82 = triton_helpers.maximum(tmp81, tmp27) tmp83 = triton_helpers.minimum(tmp82, tmp4) tmp84 = tmp83 + tmp4 tmp85 = tmp84 * tmp31 tmp86 = tmp85 - tmp33 tmp87 = tmp86 * tmp84 tmp88 = tmp87 + tmp36 tmp89 = tmp88 * tmp84 tmp90 = tmp89 - tmp39 tmp91 = tmp80 * tmp90 tmp92 = triton_helpers.maximum(tmp8, tmp11) tmp93 = triton_helpers.minimum(tmp92, tmp13) tmp94 = tl.load(in_ptr0 + (tmp24 + 4 * tmp93 + 16 * x2), xmask, eviction_policy='evict_last') tmp95 = tmp94 * tmp40 tmp96 = tl.load(in_ptr0 + (tmp43 + 4 * tmp93 + 16 * x2), xmask, eviction_policy='evict_last') tmp97 = tmp96 * tmp51 tmp98 = tl.load(in_ptr0 + (tmp55 + 4 * tmp93 + 16 * x2), xmask, eviction_policy='evict_last') tmp99 = tmp98 * tmp62 tmp100 = tl.load(in_ptr0 + (tmp67 + 4 * tmp93 + 16 * x2), xmask, eviction_policy='evict_last') tmp101 = tmp100 * tmp76 tmp102 = tmp8 + tmp9 tmp103 = triton_helpers.maximum(tmp102, tmp11) tmp104 = triton_helpers.minimum(tmp103, tmp13) tmp105 = tl.load(in_ptr0 + (tmp24 + 4 * tmp104 + 16 * x2), xmask, eviction_policy='evict_last') tmp106 = tmp105 * tmp40 tmp107 = tl.load(in_ptr0 + (tmp43 + 4 * tmp104 + 16 * x2), xmask, eviction_policy='evict_last') tmp108 = tmp107 * tmp51 tmp109 = tl.load(in_ptr0 + (tmp55 + 4 * tmp104 + 16 * x2), xmask, eviction_policy='evict_last') tmp110 = tmp109 * tmp62 tmp111 = tl.load(in_ptr0 + (tmp67 + 4 * tmp104 + 16 * x2), xmask, eviction_policy='evict_last') tmp112 = tmp111 * tmp76 tmp113 = tmp95 + tmp97 tmp114 = tmp113 + tmp99 tmp115 = tmp114 + tmp101 tmp116 = tmp83 * tmp45 tmp117 = tmp116 - tmp47 tmp118 = tmp117 * tmp83 tmp119 = tmp118 * tmp83 tmp120 = tmp119 + tmp4 tmp121 = tmp115 * tmp120 tmp122 = tmp91 + tmp121 tmp123 = tmp106 + tmp108 tmp124 = tmp123 + tmp110 tmp125 = tmp124 + tmp112 tmp126 = tmp4 - tmp83 tmp127 = tmp126 * tmp45 tmp128 = tmp127 - tmp47 tmp129 = tmp128 * tmp126 tmp130 = tmp129 * tmp126 tmp131 = tmp130 + tmp4 tmp132 = tmp125 * tmp131 tmp133 = tmp122 + tmp132 tmp134 = tmp8 + tmp64 tmp135 = triton_helpers.maximum(tmp134, tmp11) tmp136 = triton_helpers.minimum(tmp135, tmp13) tmp137 = tl.load(in_ptr0 + (tmp24 + 4 * tmp136 + 16 * x2), xmask, eviction_policy='evict_last') tmp138 = tmp137 * tmp40 tmp139 = tl.load(in_ptr0 + (tmp43 + 4 * tmp136 + 16 * x2), xmask, eviction_policy='evict_last') tmp140 = tmp139 * tmp51 tmp141 = tl.load(in_ptr0 + (tmp55 + 4 * tmp136 + 16 * x2), xmask, eviction_policy='evict_last') tmp142 = tmp141 * tmp62 tmp143 = tl.load(in_ptr0 + (tmp67 + 4 * tmp136 + 16 * x2), xmask, eviction_policy='evict_last') tmp144 = tmp143 * tmp76 tmp145 = tmp138 + tmp140 tmp146 = tmp145 + tmp142 tmp147 = tmp146 + tmp144 tmp148 = tmp69 - tmp83 tmp149 = tmp148 * tmp31 tmp150 = tmp149 - tmp33 tmp151 = tmp150 * tmp148 tmp152 = tmp151 + tmp36 tmp153 = tmp152 * tmp148 tmp154 = tmp153 - tmp39 tmp155 = tmp147 * tmp154 tmp156 = tmp133 + tmp155 tl.store(in_out_ptr1 + x3, tmp156, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf13 = buf10 del buf10 buf19 = buf13 del buf13 get_raw_stream(0) triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_floor_mul_rsub_sub_0[ grid(256)](buf19, arg0_1, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf19, class BicubicNew(Module): def __init__(self, scale_factor): super().__init__() self.scale_factor = scale_factor def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
ShivanshuPurohit/Diffusion
Bicubic
false
1,057
[ "MIT" ]
0
9a190d9aa4ed9767cf223e4ef57d0c31690f92cc
https://github.com/ShivanshuPurohit/Diffusion/tree/9a190d9aa4ed9767cf223e4ef57d0c31690f92cc
from torch.nn import Module import torch import torch.nn.functional as F class Model(Module): def __init__(self, scale_factor): super().__init__() self.scale_factor = scale_factor def forward(self, x): return F.interpolate(x, scale_factor=self.scale_factor, mode='bicubic') def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [1.0]
adder2d
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/w7/cw7nwm4phyff5u2mxvg4wffuk3qkl3kcrshhf3qydzhb7zd4zl75.py # Topologically Sorted Source Nodes: [contiguous_1], Original ATen: [aten.clone] # Source node to ATen node mapping: # contiguous_1 => clone_1 # Graph fragment: # %clone_1 : [num_users=3] = call_function[target=torch.ops.aten.clone.default](args = (%permute_2,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex tmp0 = tl.load(in_ptr0 + (x2), xmask) tl.store(out_ptr0 + (x2), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/fh/cfhxlck7uzhxtofknhjghf2xokzgxovbt22nsyc7lfq6lggpmsc6.py # Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.neg] # Source node to ATen node mapping: # out_2 => neg # Graph fragment: # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%_cdist_forward,), kwargs = {}) triton_poi_fused_neg_1 = async_compile.triton('triton_poi_fused_neg_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_neg_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_neg_1(in_out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = -tmp0 tl.store(in_out_ptr0 + (x0), tmp1, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/xb/cxb4lxq2tbmyzyffeyfgkeb34canjnomidn7ybre2qjgrajgfrs5.py # Topologically Sorted Source Nodes: [out], Original ATen: [aten.neg] # Source node to ATen node mapping: # out => neg_1 # Graph fragment: # %neg_1 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%_cdist_forward_1,), kwargs = {}) triton_poi_fused_neg_2 = async_compile.triton('triton_poi_fused_neg_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_neg_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_neg_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = -tmp0 tl.store(out_ptr0 + (x0), tmp1, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 64), (64, 1), torch.float32) # Topologically Sorted Source Nodes: [contiguous_1], Original ATen: [aten.clone] stream0 = get_raw_stream(0) triton_poi_fused_clone_0.run(primals_2, buf0, 256, grid=grid(256), stream=stream0) del primals_2 # Topologically Sorted Source Nodes: [cdist], Original ATen: [aten._cdist_forward] buf1 = torch.ops.aten._cdist_forward.default(reinterpret_tensor(primals_1, (4, 64), (64, 1), 0), buf0, 1.0, None) buf2 = buf1 del buf1 buf3 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.neg] triton_poi_fused_neg_1.run(buf3, 16, grid=grid(16), stream=stream0) # Topologically Sorted Source Nodes: [cdist_1], Original ATen: [aten._cdist_forward] buf4 = torch.ops.aten._cdist_forward.default(reinterpret_tensor(primals_1, (4, 64), (64, 1), 0), buf0, 2.0, None) buf5 = buf4 del buf4 buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [out], Original ATen: [aten.neg] triton_poi_fused_neg_2.run(buf5, buf6, 16, grid=grid(16), stream=stream0) return (buf3, buf6, reinterpret_tensor(primals_1, (4, 64), (64, 1), 0), buf0, buf5, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn def adder2d_function(X, W, stride=1, padding=0, groups=1): n_filters, _d_filter, h_filter, w_filter = W.size() n_x, _d_x, h_x, w_x = X.size() h_out = (h_x - h_filter + 2 * padding) / stride + 1 w_out = (w_x - w_filter + 2 * padding) / stride + 1 h_out, w_out = int(h_out), int(w_out) if groups == 1: X_col = torch.nn.functional.unfold(X.view(1, -1, h_x, w_x), h_filter, dilation=1, padding=padding, stride=stride).view(n_x, -1, h_out * w_out) X_col = X_col.permute(1, 2, 0).contiguous().view(X_col.size(1), -1) W_col = W.view(n_filters, -1) out_2 = -torch.cdist(W_col, X_col.transpose(0, 1).contiguous(), 1) out_2 = out_2.detach() out = -torch.cdist(W_col, X_col.transpose(0, 1).contiguous(), 2) out.data = out_2.data out = out.view(n_filters, h_out, w_out, n_x) out = out.permute(3, 0, 1, 2).contiguous() return out else: torch.Tensor(n_x, n_filters, h_out, w_out) outs = [] for g in range(groups): X_part = X[:, g, ...].unsqueeze(1) W_part = W[g, ...].unsqueeze(0) X_col = torch.nn.functional.unfold(X_part.view(1, -1, h_x, w_x), h_filter, dilation=1, padding=padding, stride=stride).view(n_x, -1, h_out * w_out) X_col = X_col.permute(1, 2, 0).contiguous().view(X_col.size(1), -1) W_col = W_part.view(1, -1) out_2 = -torch.cdist(W_col, X_col.transpose(0, 1).contiguous(), 1) out_2 = out_2.detach() out = -torch.cdist(W_col, X_col.transpose(0, 1).contiguous(), 2) out.data = out_2.data out = out.view(1, h_out, w_out, n_x) out = out.permute(3, 0, 1, 2).contiguous() outs.append(out) return torch.cat(outs, dim=1) class adder2d(nn.Module): def __init__(self, input_channel, output_channel, kernel_size, stride=1, padding=0, groups=1, bias=False): super(adder2d, self).__init__() self.stride = stride self.padding = padding self.input_channel = input_channel self.output_channel = output_channel self.kernel_size = kernel_size self.groups = groups self.adder = torch.nn.Parameter(nn.init.kaiming_normal_(torch.randn (output_channel, input_channel // groups, kernel_size, kernel_size))) self.bias = bias if bias: self.b = torch.nn.Parameter(nn.init.kaiming_uniform_(torch. zeros(output_channel))) def forward(self, x): output = adder2d_function(x, self.adder, self.stride, self.padding, self.groups) if self.bias: output += self.b.unsqueeze(0).unsqueeze(2).unsqueeze(3) return output def extra_repr(self): return ( f"{'v2'.upper()}, {self.input_channel}, {self.output_channel}, " + f'kenrel_size={self.kernel_size}, stride={self.stride}, padding={self.padding}, groups={self.groups}, bias={self.bias}' ) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_channel': 4, 'output_channel': 4, 'kernel_size': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex tmp0 = tl.load(in_ptr0 + x2, xmask) tl.store(out_ptr0 + x2, tmp0, xmask) @triton.jit def triton_poi_fused_neg_1(in_out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = -tmp0 tl.store(in_out_ptr0 + x0, tmp1, xmask) @triton.jit def triton_poi_fused_neg_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = -tmp0 tl.store(out_ptr0 + x0, tmp1, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 64), (64, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(256)](primals_2, buf0, 256, XBLOCK= 128, num_warps=4, num_stages=1) del primals_2 buf1 = torch.ops.aten._cdist_forward.default(reinterpret_tensor( primals_1, (4, 64), (64, 1), 0), buf0, 1.0, None) buf2 = buf1 del buf1 buf3 = buf2 del buf2 triton_poi_fused_neg_1[grid(16)](buf3, 16, XBLOCK=16, num_warps=1, num_stages=1) buf4 = torch.ops.aten._cdist_forward.default(reinterpret_tensor( primals_1, (4, 64), (64, 1), 0), buf0, 2.0, None) buf5 = buf4 del buf4 buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_neg_2[grid(16)](buf5, buf6, 16, XBLOCK=16, num_warps=1, num_stages=1) return buf3, buf6, reinterpret_tensor(primals_1, (4, 64), (64, 1), 0 ), buf0, buf5 def adder2d_function(X, W, stride=1, padding=0, groups=1): n_filters, _d_filter, h_filter, w_filter = W.size() n_x, _d_x, h_x, w_x = X.size() h_out = (h_x - h_filter + 2 * padding) / stride + 1 w_out = (w_x - w_filter + 2 * padding) / stride + 1 h_out, w_out = int(h_out), int(w_out) if groups == 1: X_col = torch.nn.functional.unfold(X.view(1, -1, h_x, w_x), h_filter, dilation=1, padding=padding, stride=stride).view(n_x, -1, h_out * w_out) X_col = X_col.permute(1, 2, 0).contiguous().view(X_col.size(1), -1) W_col = W.view(n_filters, -1) out_2 = -torch.cdist(W_col, X_col.transpose(0, 1).contiguous(), 1) out_2 = out_2.detach() out = -torch.cdist(W_col, X_col.transpose(0, 1).contiguous(), 2) out.data = out_2.data out = out.view(n_filters, h_out, w_out, n_x) out = out.permute(3, 0, 1, 2).contiguous() return out else: torch.Tensor(n_x, n_filters, h_out, w_out) outs = [] for g in range(groups): X_part = X[:, g, ...].unsqueeze(1) W_part = W[g, ...].unsqueeze(0) X_col = torch.nn.functional.unfold(X_part.view(1, -1, h_x, w_x), h_filter, dilation=1, padding=padding, stride=stride).view(n_x, -1, h_out * w_out) X_col = X_col.permute(1, 2, 0).contiguous().view(X_col.size(1), -1) W_col = W_part.view(1, -1) out_2 = -torch.cdist(W_col, X_col.transpose(0, 1).contiguous(), 1) out_2 = out_2.detach() out = -torch.cdist(W_col, X_col.transpose(0, 1).contiguous(), 2) out.data = out_2.data out = out.view(1, h_out, w_out, n_x) out = out.permute(3, 0, 1, 2).contiguous() outs.append(out) return torch.cat(outs, dim=1) class adder2dNew(nn.Module): def __init__(self, input_channel, output_channel, kernel_size, stride=1, padding=0, groups=1, bias=False): super(adder2dNew, self).__init__() self.stride = stride self.padding = padding self.input_channel = input_channel self.output_channel = output_channel self.kernel_size = kernel_size self.groups = groups self.adder = torch.nn.Parameter(nn.init.kaiming_normal_(torch.randn (output_channel, input_channel // groups, kernel_size, kernel_size))) self.bias = bias if bias: self.b = torch.nn.Parameter(nn.init.kaiming_uniform_(torch. zeros(output_channel))) def extra_repr(self): return ( f"{'v2'.upper()}, {self.input_channel}, {self.output_channel}, " + f'kenrel_size={self.kernel_size}, stride={self.stride}, padding={self.padding}, groups={self.groups}, bias={self.bias}' ) def forward(self, input_0): primals_1 = self.adder primals_2 = input_0 output = call([primals_1, primals_2]) return output[0]
ShangyinGao/pytorch-cifar
adder2d
false
1,058
[ "MIT" ]
0
480e19825bb155e3d0fafae3545faa3a4165bd77
https://github.com/ShangyinGao/pytorch-cifar/tree/480e19825bb155e3d0fafae3545faa3a4165bd77
import torch import torch.nn as nn def adder2d_function(X, W, stride=1, padding=0, groups=1): n_filters, _d_filter, h_filter, w_filter = W.size() n_x, _d_x, h_x, w_x = X.size() h_out = (h_x - h_filter + 2 * padding) / stride + 1 w_out = (w_x - w_filter + 2 * padding) / stride + 1 h_out, w_out = int(h_out), int(w_out) if groups == 1: X_col = torch.nn.functional.unfold(X.view(1, -1, h_x, w_x), h_filter, dilation=1, padding=padding, stride=stride).view(n_x, -1, h_out * w_out) X_col = X_col.permute(1, 2, 0).contiguous().view(X_col.size(1), -1) W_col = W.view(n_filters, -1) out_2 = -torch.cdist(W_col, X_col.transpose(0, 1).contiguous(), 1) out_2 = out_2.detach() out = -torch.cdist(W_col, X_col.transpose(0, 1).contiguous(), 2) out.data = out_2.data out = out.view(n_filters, h_out, w_out, n_x) out = out.permute(3, 0, 1, 2).contiguous() return out else: torch.Tensor(n_x, n_filters, h_out, w_out) outs = [] for g in range(groups): X_part = X[:, g, ...].unsqueeze(1) W_part = W[g, ...].unsqueeze(0) X_col = torch.nn.functional.unfold(X_part.view(1, -1, h_x, w_x), h_filter, dilation=1, padding=padding, stride=stride).view(n_x, -1, h_out * w_out) X_col = X_col.permute(1, 2, 0).contiguous().view(X_col.size(1), -1) W_col = W_part.view(1, -1) out_2 = -torch.cdist(W_col, X_col.transpose(0, 1).contiguous(), 1) out_2 = out_2.detach() out = -torch.cdist(W_col, X_col.transpose(0, 1).contiguous(), 2) out.data = out_2.data out = out.view(1, h_out, w_out, n_x) out = out.permute(3, 0, 1, 2).contiguous() outs.append(out) return torch.cat(outs, dim=1) class Model(nn.Module): def __init__(self, input_channel, output_channel, kernel_size, stride=1, padding=0, groups=1, bias=False): super().__init__() self.stride = stride self.padding = padding self.input_channel = input_channel self.output_channel = output_channel self.kernel_size = kernel_size self.groups = groups self.adder = torch.nn.Parameter(nn.init.kaiming_normal_(torch.randn (output_channel, input_channel // groups, kernel_size, kernel_size))) self.bias = bias if bias: self.b = torch.nn.Parameter(nn.init.kaiming_uniform_(torch. zeros(output_channel))) def forward(self, x): output = adder2d_function(x, self.adder, self.stride, self.padding, self.groups) if self.bias: output += self.b.unsqueeze(0).unsqueeze(2).unsqueeze(3) return output def extra_repr(self): return ( f"{'v2'.upper()}, {self.input_channel}, {self.output_channel}, " + f'kenrel_size={self.kernel_size}, stride={self.stride}, padding={self.padding}, groups={self.groups}, bias={self.bias}' ) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 4, 4]
FFN
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/r3/cr3febcwm3t44fuoitsx3ou2p6xg4sk4f7unagmmrvffasxf47te.py # Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # relu => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, xmask) tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/ji/cji7mw45fbdoanjc5e6qu3e2bf5d6jnnjabskl6onjlk7uv7oqud.py # Topologically Sorted Source Nodes: [tgt, tgt_1], Original ATen: [aten.add, aten.native_layer_norm] # Source node to ATen node mapping: # tgt => add # tgt_1 => var_mean # Graph fragment: # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_3, %view_3), kwargs = {}) # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add, [3]), kwargs = {correction: 0, keepdim: True}) triton_poi_fused_add_native_layer_norm_1 = async_compile.triton('triton_poi_fused_add_native_layer_norm_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_native_layer_norm_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + (x0), tmp16, xmask) tl.store(out_ptr1 + (x0), tmp28, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/xy/cxyvzp6lij7d3yqq2ut3vi6guk7xnzb7qwqb66dthlly44r65vfk.py # Topologically Sorted Source Nodes: [tgt, tgt_1], Original ATen: [aten.add, aten.native_layer_norm] # Source node to ATen node mapping: # tgt => add # tgt_1 => add_1, add_2, mul, mul_1, rsqrt, sub # Graph fragment: # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_3, %view_3), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_1,), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %getitem_1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_6), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_7), kwargs = {}) triton_poi_fused_add_native_layer_norm_2 = async_compile.triton('triton_poi_fused_add_native_layer_norm_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_native_layer_norm_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x2), xmask) tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + (x2), tmp13, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, ), (1, )) assert_size_stride(primals_7, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf6, 256, grid=grid(256), stream=stream0) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [tgt2], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_5 buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) # Topologically Sorted Source Nodes: [tgt, tgt_1], Original ATen: [aten.add, aten.native_layer_norm] triton_poi_fused_add_native_layer_norm_1.run(primals_3, buf2, buf3, buf4, 64, grid=grid(64), stream=stream0) buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [tgt, tgt_1], Original ATen: [aten.add, aten.native_layer_norm] triton_poi_fused_add_native_layer_norm_2.run(primals_3, buf2, buf3, buf4, primals_6, primals_7, buf5, 256, grid=grid(256), stream=stream0) del buf3 del buf4 del primals_7 return (buf5, primals_3, primals_6, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), buf2, primals_4, buf6, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.utils.data import torchvision.transforms.functional as F import torch.nn as nn import torch.nn.functional as F class FFN(nn.Module): def __init__(self, d_model, d_ffn, dropout=0): super().__init__() self.linear1 = nn.Linear(d_model, d_ffn) self.activation = F.relu self.dropout1 = nn.Dropout(dropout) self.linear2 = nn.Linear(d_ffn, d_model) self.dropout2 = nn.Dropout(dropout) self.norm = nn.LayerNorm(d_model) def forward(self, tgt): tgt2 = self.linear2(self.dropout1(self.activation(self.linear1(tgt)))) tgt = tgt + self.dropout2(tgt2) tgt = self.norm(tgt) return tgt def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'d_model': 4, 'd_ffn': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.utils.data import torchvision.transforms.functional as F import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + x0, tmp16, xmask) tl.store(out_ptr1 + x0, tmp28, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1, primals_2, buf6, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_5 buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) triton_poi_fused_add_native_layer_norm_1[grid(64)](primals_3, buf2, buf3, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1) buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_native_layer_norm_2[grid(256)](primals_3, buf2, buf3, buf4, primals_6, primals_7, buf5, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf3 del buf4 del primals_7 return buf5, primals_3, primals_6, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), buf2, primals_4, buf6 class FFNNew(nn.Module): def __init__(self, d_model, d_ffn, dropout=0): super().__init__() self.linear1 = nn.Linear(d_model, d_ffn) self.activation = F.relu self.dropout1 = nn.Dropout(dropout) self.linear2 = nn.Linear(d_ffn, d_model) self.dropout2 = nn.Dropout(dropout) self.norm = nn.LayerNorm(d_model) def forward(self, input_0): primals_1 = self.linear1.weight primals_2 = self.linear1.bias primals_4 = self.linear2.weight primals_5 = self.linear2.bias primals_6 = self.norm.weight primals_7 = self.norm.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
SelvamArul/MOTR
FFN
false
1,059
[ "MIT" ]
0
2a0b70288feaca665d460096159100d5077e9312
https://github.com/SelvamArul/MOTR/tree/2a0b70288feaca665d460096159100d5077e9312
import torch import torch.utils.data import torchvision.transforms.functional as F import torch.nn as nn import torch.nn.functional as F class Model(nn.Module): def __init__(self, d_model, d_ffn, dropout=0): super().__init__() self.linear1 = nn.Linear(d_model, d_ffn) self.activation = F.relu self.dropout1 = nn.Dropout(dropout) self.linear2 = nn.Linear(d_ffn, d_model) self.dropout2 = nn.Dropout(dropout) self.norm = nn.LayerNorm(d_model) def forward(self, tgt): tgt2 = self.linear2(self.dropout1(self.activation(self.linear1(tgt)))) tgt = tgt + self.dropout2(tgt2) tgt = self.norm(tgt) return tgt def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 4]
BinaryReg
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/dx/cdxzp5g4colwzu5hffvbsmu53nbfzo5dpcn2duoxzlf2thhrusqc.py # Topologically Sorted Source Nodes: [diff, abs_1, diff_1, truediv, loss, mul], Original ATen: [aten.sub, aten.abs, aten.clamp, aten.reciprocal, aten.mul, aten.mean] # Source node to ATen node mapping: # abs_1 => abs_1 # diff => sub # diff_1 => clamp_min # loss => mean # mul => mul_1 # truediv => mul, reciprocal # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, 0.5), kwargs = {}) # %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%sub,), kwargs = {}) # %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%abs_1, 0.01), kwargs = {}) # %reciprocal : [num_users=1] = call_function[target=torch.ops.aten.reciprocal.default](args = (%clamp_min,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%reciprocal, 1.0), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%mul,), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean, 0.1), kwargs = {}) triton_per_fused_abs_clamp_mean_mul_reciprocal_sub_0 = async_compile.triton('triton_per_fused_abs_clamp_mean_mul_reciprocal_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=(2,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_abs_clamp_mean_mul_reciprocal_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_abs_clamp_mean_mul_reciprocal_sub_0(in_out_ptr0, in_ptr0, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = 0.5 tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp4 = 0.01 tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp6 = tl.full([1], 1, tl.int32) tmp7 = tmp6 / tmp5 tmp8 = 1.0 tmp9 = tmp7 * tmp8 tmp10 = tl.broadcast_to(tmp9, [RBLOCK]) tmp12 = triton_helpers.promote_to_tensor(tl.sum(tmp10, 0)) tmp13 = 256.0 tmp14 = tmp12 / tmp13 tmp15 = 0.1 tmp16 = tmp14 * tmp15 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp16, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [diff, abs_1, diff_1, truediv, loss, mul], Original ATen: [aten.sub, aten.abs, aten.clamp, aten.reciprocal, aten.mul, aten.mean] stream0 = get_raw_stream(0) triton_per_fused_abs_clamp_mean_mul_reciprocal_sub_0.run(buf1, arg0_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.utils.data class BinaryReg(nn.Module): """Regularization for encouraging the outputs to be binary. """ def __init__(self, alpha=0.1): super().__init__() self.alpha = alpha def forward(self, pred): diff = pred - 0.5 diff = torch.clamp(torch.abs(diff), min=0.01) loss = (1.0 / diff).mean() return self.alpha * loss def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_abs_clamp_mean_mul_reciprocal_sub_0(in_out_ptr0, in_ptr0, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = 0.5 tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp4 = 0.01 tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp6 = tl.full([1], 1, tl.int32) tmp7 = tmp6 / tmp5 tmp8 = 1.0 tmp9 = tmp7 * tmp8 tmp10 = tl.broadcast_to(tmp9, [RBLOCK]) tmp12 = triton_helpers.promote_to_tensor(tl.sum(tmp10, 0)) tmp13 = 256.0 tmp14 = tmp12 / tmp13 tmp15 = 0.1 tmp16 = tmp14 * tmp15 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp16, None) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_abs_clamp_mean_mul_reciprocal_sub_0[grid(1)](buf1, arg0_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 return buf1, class BinaryRegNew(nn.Module): """Regularization for encouraging the outputs to be binary. """ def __init__(self, alpha=0.1): super().__init__() self.alpha = alpha def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
Shray64/pytorch_connectomics
BinaryReg
false
1,060
[ "MIT" ]
0
d6c814f11ac2f8418ede5ae220a93016f50214fc
https://github.com/Shray64/pytorch_connectomics/tree/d6c814f11ac2f8418ede5ae220a93016f50214fc
import torch import torch.nn as nn import torch.utils.data class Model(nn.Module): """Regularization for encouraging the outputs to be binary. """ def __init__(self, alpha=0.1): super().__init__() self.alpha = alpha def forward(self, pred): diff = pred - 0.5 diff = torch.clamp(torch.abs(diff), min=0.01) loss = (1.0 / diff).mean() return self.alpha * loss def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
MessageNormalizer
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/5p/c5pq5ihkqog6yst24r6r2rrf5qe3nsxkwwpixhsiqjqc6rcatvet.py # Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul] # Source node to ATen node mapping: # mul => mul # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, %primals_2), kwargs = {}) triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x2), xmask) tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + (x2), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, ), (1, )) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_mul_0.run(primals_1, primals_2, buf0, 256, grid=grid(256), stream=stream0) del primals_1 return (buf0, primals_2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class MessageNormalizer(nn.Module): def __init__(self, in_features, init_mean=1.0, init_stddev=0.01): super(MessageNormalizer, self).__init__() self.in_features = in_features self.out_features = in_features self.weight = torch.nn.Parameter(torch.Tensor(in_features)) self.init_mean = init_mean self.init_stddev = init_stddev self.reset_parameters() def reset_parameters(self): self.weight.data.normal_(mean=self.init_mean, std=self.init_stddev) def forward(self, message): return self.weight * message def __repr__(self): return self.__class__.__name__ + '(' + 'in_features=' + str(self. in_features) + 'out_features=' + str(self.out_features) + ')' def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_features': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, xmask) tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x2, tmp2, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4,), (1,)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(256)](primals_1, primals_2, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 return buf0, primals_2 class MessageNormalizerNew(nn.Module): def __init__(self, in_features, init_mean=1.0, init_stddev=0.01): super(MessageNormalizerNew, self).__init__() self.in_features = in_features self.out_features = in_features self.weight = torch.nn.Parameter(torch.Tensor(in_features)) self.init_mean = init_mean self.init_stddev = init_stddev self.reset_parameters() def reset_parameters(self): self.weight.data.normal_(mean=self.init_mean, std=self.init_stddev) def __repr__(self): return self.__class__.__name__ + '(' + 'in_features=' + str(self. in_features) + 'out_features=' + str(self.out_features) + ')' def forward(self, input_0): primals_1 = self.weight primals_2 = input_0 output = call([primals_1, primals_2]) return output[0]
ShinyaFUKUMOTO/LeMPA
MessageNormalizer
false
1,061
[ "BSD-2-Clause" ]
0
23b8c9f60fc13cf28d4485757d2ae0b3465b3e92
https://github.com/ShinyaFUKUMOTO/LeMPA/tree/23b8c9f60fc13cf28d4485757d2ae0b3465b3e92
import torch import torch.nn as nn class Model(nn.Module): def __init__(self, in_features, init_mean=1.0, init_stddev=0.01): super().__init__() self.in_features = in_features self.out_features = in_features self.weight = torch.nn.Parameter(torch.Tensor(in_features)) self.init_mean = init_mean self.init_stddev = init_stddev self.reset_parameters() def reset_parameters(self): self.weight.data.normal_(mean=self.init_mean, std=self.init_stddev) def forward(self, message): return self.weight * message def __repr__(self): return self.__class__.__name__ + '(' + 'in_features=' + str(self. in_features) + 'out_features=' + str(self.out_features) + ')' def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4]
MaxPoolStride1
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/rz/crzg7kxbg2534vzhc3kxhbostzh2mlj5xgy25tqm37zxwoqenf5z.py # Topologically Sorted Source Nodes: [pooled_x], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # pooled_x => getitem # Graph fragment: # %getitem : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_0 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 2 x1 = (xindex // 2) % 2 x2 = (xindex // 4) x4 = xindex tmp0 = tl.load(in_ptr0 + ((4*((3) * ((3) <= (3*x1)) + (3*x1) * ((3*x1) < (3)))) + (16*x2) + ((3) * ((3) <= (3*x0)) + (3*x0) * ((3*x0) < (3)))), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + ((4*((3) * ((3) <= (3*x1)) + (3*x1) * ((3*x1) < (3)))) + (16*x2) + ((3) * ((3) <= (1 + (3*x0))) + (1 + (3*x0)) * ((1 + (3*x0)) < (3)))), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + ((4*((3) * ((3) <= (3*x1)) + (3*x1) * ((3*x1) < (3)))) + (16*x2) + ((3) * ((3) <= (2 + (3*x0))) + (2 + (3*x0)) * ((2 + (3*x0)) < (3)))), xmask) tmp5 = tl.load(in_ptr0 + (3 + (4*((3) * ((3) <= (3*x1)) + (3*x1) * ((3*x1) < (3)))) + (16*x2)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + ((4*((3) * ((3) <= (1 + (3*x1))) + (1 + (3*x1)) * ((1 + (3*x1)) < (3)))) + (16*x2) + ((3) * ((3) <= (3*x0)) + (3*x0) * ((3*x0) < (3)))), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + ((4*((3) * ((3) <= (1 + (3*x1))) + (1 + (3*x1)) * ((1 + (3*x1)) < (3)))) + (16*x2) + ((3) * ((3) <= (1 + (3*x0))) + (1 + (3*x0)) * ((1 + (3*x0)) < (3)))), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + ((4*((3) * ((3) <= (1 + (3*x1))) + (1 + (3*x1)) * ((1 + (3*x1)) < (3)))) + (16*x2) + ((3) * ((3) <= (2 + (3*x0))) + (2 + (3*x0)) * ((2 + (3*x0)) < (3)))), xmask) tmp13 = tl.load(in_ptr0 + (3 + (4*((3) * ((3) <= (1 + (3*x1))) + (1 + (3*x1)) * ((1 + (3*x1)) < (3)))) + (16*x2)), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr0 + ((4*((3) * ((3) <= (2 + (3*x1))) + (2 + (3*x1)) * ((2 + (3*x1)) < (3)))) + (16*x2) + ((3) * ((3) <= (3*x0)) + (3*x0) * ((3*x0) < (3)))), xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr0 + ((4*((3) * ((3) <= (2 + (3*x1))) + (2 + (3*x1)) * ((2 + (3*x1)) < (3)))) + (16*x2) + ((3) * ((3) <= (1 + (3*x0))) + (1 + (3*x0)) * ((1 + (3*x0)) < (3)))), xmask, eviction_policy='evict_last') tmp19 = tl.load(in_ptr0 + ((4*((3) * ((3) <= (2 + (3*x1))) + (2 + (3*x1)) * ((2 + (3*x1)) < (3)))) + (16*x2) + ((3) * ((3) <= (2 + (3*x0))) + (2 + (3*x0)) * ((2 + (3*x0)) < (3)))), xmask) tmp21 = tl.load(in_ptr0 + (3 + (4*((3) * ((3) <= (2 + (3*x1))) + (2 + (3*x1)) * ((2 + (3*x1)) < (3)))) + (16*x2)), xmask, eviction_policy='evict_last') tmp23 = tl.load(in_ptr0 + (12 + (16*x2) + ((3) * ((3) <= (3*x0)) + (3*x0) * ((3*x0) < (3)))), xmask, eviction_policy='evict_last') tmp25 = tl.load(in_ptr0 + (12 + (16*x2) + ((3) * ((3) <= (1 + (3*x0))) + (1 + (3*x0)) * ((1 + (3*x0)) < (3)))), xmask, eviction_policy='evict_last') tmp27 = tl.load(in_ptr0 + (12 + (16*x2) + ((3) * ((3) <= (2 + (3*x0))) + (2 + (3*x0)) * ((2 + (3*x0)) < (3)))), xmask, eviction_policy='evict_last') tmp29 = tl.load(in_ptr0 + (15 + (16*x2)), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp8 = triton_helpers.maximum(tmp7, tmp6) tmp10 = triton_helpers.maximum(tmp9, tmp8) tmp12 = triton_helpers.maximum(tmp11, tmp10) tmp14 = triton_helpers.maximum(tmp13, tmp12) tmp16 = triton_helpers.maximum(tmp15, tmp14) tmp18 = triton_helpers.maximum(tmp17, tmp16) tmp20 = triton_helpers.maximum(tmp19, tmp18) tmp22 = triton_helpers.maximum(tmp21, tmp20) tmp24 = triton_helpers.maximum(tmp23, tmp22) tmp26 = triton_helpers.maximum(tmp25, tmp24) tmp28 = triton_helpers.maximum(tmp27, tmp26) tmp30 = triton_helpers.maximum(tmp29, tmp28) tl.store(out_ptr0 + (x4), tmp30, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32) # Topologically Sorted Source Nodes: [pooled_x], Original ATen: [aten.max_pool2d_with_indices] stream0 = get_raw_stream(0) triton_poi_fused_max_pool2d_with_indices_0.run(arg0_1, buf0, 64, grid=grid(64), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.utils.data import torch.utils.data.distributed import torch.nn.functional as F import torch._utils class MaxPoolStride1(nn.Module): def __init__(self, kernel_size): super(MaxPoolStride1, self).__init__() self.kernel_size = kernel_size self.pad = kernel_size - 1 def forward(self, x): padded_x = F.pad(x, (0, self.pad, 0, self.pad), mode='replicate') pooled_x = nn.MaxPool2d(self.kernel_size, self.pad)(padded_x) return pooled_x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'kernel_size': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.utils.data import torch.utils.data.distributed import torch._utils assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_max_pool2d_with_indices_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 2 x1 = xindex // 2 % 2 x2 = xindex // 4 x4 = xindex tmp0 = tl.load(in_ptr0 + (4 * (3 * (3 <= 3 * x1) + 3 * x1 * (3 * x1 < 3 )) + 16 * x2 + (3 * (3 <= 3 * x0) + 3 * x0 * (3 * x0 < 3))), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (4 * (3 * (3 <= 3 * x1) + 3 * x1 * (3 * x1 < 3 )) + 16 * x2 + (3 * (3 <= 1 + 3 * x0) + (1 + 3 * x0) * (1 + 3 * x0 < 3))), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (4 * (3 * (3 <= 3 * x1) + 3 * x1 * (3 * x1 < 3 )) + 16 * x2 + (3 * (3 <= 2 + 3 * x0) + (2 + 3 * x0) * (2 + 3 * x0 < 3))), xmask) tmp5 = tl.load(in_ptr0 + (3 + 4 * (3 * (3 <= 3 * x1) + 3 * x1 * (3 * x1 < 3)) + 16 * x2), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (4 * (3 * (3 <= 1 + 3 * x1) + (1 + 3 * x1) * ( 1 + 3 * x1 < 3)) + 16 * x2 + (3 * (3 <= 3 * x0) + 3 * x0 * (3 * x0 < 3))), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (4 * (3 * (3 <= 1 + 3 * x1) + (1 + 3 * x1) * ( 1 + 3 * x1 < 3)) + 16 * x2 + (3 * (3 <= 1 + 3 * x0) + (1 + 3 * x0) * (1 + 3 * x0 < 3))), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (4 * (3 * (3 <= 1 + 3 * x1) + (1 + 3 * x1) * (1 + 3 * x1 < 3)) + 16 * x2 + (3 * (3 <= 2 + 3 * x0) + (2 + 3 * x0) * (2 + 3 * x0 < 3))), xmask) tmp13 = tl.load(in_ptr0 + (3 + 4 * (3 * (3 <= 1 + 3 * x1) + (1 + 3 * x1 ) * (1 + 3 * x1 < 3)) + 16 * x2), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr0 + (4 * (3 * (3 <= 2 + 3 * x1) + (2 + 3 * x1) * (2 + 3 * x1 < 3)) + 16 * x2 + (3 * (3 <= 3 * x0) + 3 * x0 * (3 * x0 < 3))), xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr0 + (4 * (3 * (3 <= 2 + 3 * x1) + (2 + 3 * x1) * (2 + 3 * x1 < 3)) + 16 * x2 + (3 * (3 <= 1 + 3 * x0) + (1 + 3 * x0) * (1 + 3 * x0 < 3))), xmask, eviction_policy='evict_last') tmp19 = tl.load(in_ptr0 + (4 * (3 * (3 <= 2 + 3 * x1) + (2 + 3 * x1) * (2 + 3 * x1 < 3)) + 16 * x2 + (3 * (3 <= 2 + 3 * x0) + (2 + 3 * x0) * (2 + 3 * x0 < 3))), xmask) tmp21 = tl.load(in_ptr0 + (3 + 4 * (3 * (3 <= 2 + 3 * x1) + (2 + 3 * x1 ) * (2 + 3 * x1 < 3)) + 16 * x2), xmask, eviction_policy='evict_last') tmp23 = tl.load(in_ptr0 + (12 + 16 * x2 + (3 * (3 <= 3 * x0) + 3 * x0 * (3 * x0 < 3))), xmask, eviction_policy='evict_last') tmp25 = tl.load(in_ptr0 + (12 + 16 * x2 + (3 * (3 <= 1 + 3 * x0) + (1 + 3 * x0) * (1 + 3 * x0 < 3))), xmask, eviction_policy='evict_last') tmp27 = tl.load(in_ptr0 + (12 + 16 * x2 + (3 * (3 <= 2 + 3 * x0) + (2 + 3 * x0) * (2 + 3 * x0 < 3))), xmask, eviction_policy='evict_last') tmp29 = tl.load(in_ptr0 + (15 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp8 = triton_helpers.maximum(tmp7, tmp6) tmp10 = triton_helpers.maximum(tmp9, tmp8) tmp12 = triton_helpers.maximum(tmp11, tmp10) tmp14 = triton_helpers.maximum(tmp13, tmp12) tmp16 = triton_helpers.maximum(tmp15, tmp14) tmp18 = triton_helpers.maximum(tmp17, tmp16) tmp20 = triton_helpers.maximum(tmp19, tmp18) tmp22 = triton_helpers.maximum(tmp21, tmp20) tmp24 = triton_helpers.maximum(tmp23, tmp22) tmp26 = triton_helpers.maximum(tmp25, tmp24) tmp28 = triton_helpers.maximum(tmp27, tmp26) tmp30 = triton_helpers.maximum(tmp29, tmp28) tl.store(out_ptr0 + x4, tmp30, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32) get_raw_stream(0) triton_poi_fused_max_pool2d_with_indices_0[grid(64)](arg0_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 return buf0, class MaxPoolStride1New(nn.Module): def __init__(self, kernel_size): super(MaxPoolStride1New, self).__init__() self.kernel_size = kernel_size self.pad = kernel_size - 1 def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
Sarathismg/Pose-Estimator-Old-Version
MaxPoolStride1
false
1,062
[ "Apache-2.0" ]
0
ecaa03769323b94a4d7222e2d3606d1ce92a2fae
https://github.com/Sarathismg/Pose-Estimator-Old-Version/tree/ecaa03769323b94a4d7222e2d3606d1ce92a2fae
import torch import torch.nn as nn import torch.utils.data import torch.utils.data.distributed import torch.nn.functional as F import torch._utils class Model(nn.Module): def __init__(self, kernel_size): super().__init__() self.kernel_size = kernel_size self.pad = kernel_size - 1 def forward(self, x): padded_x = F.pad(x, (0, self.pad, 0, self.pad), mode='replicate') pooled_x = nn.MaxPool2d(self.kernel_size, self.pad)(padded_x) return pooled_x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4]
GroupNorm32
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/zy/czyl6lkgkemly2rorp7dytnq5bsikpucv7yhz2var2hddv5xnkj4.py # Topologically Sorted Source Nodes: [y, mul, sigmoid, y_1], Original ATen: [aten.native_group_norm, aten.mul, aten.sigmoid] # Source node to ATen node mapping: # mul => mul_2 # sigmoid => sigmoid # y => add, add_1, mul_1, rsqrt, var_mean # y_1 => mul_3 # Graph fragment: # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view, [2, 3]), kwargs = {correction: 0, keepdim: True}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {}) # %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, %unsqueeze_5), kwargs = {}) # %add_1 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %unsqueeze_2), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_1, 4.0), kwargs = {}) # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%mul_2,), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_1, %sigmoid), kwargs = {}) triton_per_fused_mul_native_group_norm_sigmoid_0 = async_compile.triton('triton_per_fused_mul_native_group_norm_sigmoid_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[4, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mul_native_group_norm_sigmoid_0', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_mul_native_group_norm_sigmoid_0(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 4 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex r3 = (rindex // 16) tmp0 = tl.load(in_ptr0 + (r1 + (64*x0)), xmask, other=0.0) tmp24 = tl.load(in_ptr1 + (r3), None, eviction_policy='evict_last') tmp26 = tl.load(in_ptr2 + (r3), None, eviction_policy='evict_last') tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 64, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tmp17 = 64.0 tmp18 = tmp16 / tmp17 tmp19 = 1e-05 tmp20 = tmp18 + tmp19 tmp21 = libdevice.rsqrt(tmp20) tmp22 = tmp0 - tmp10 tmp23 = tmp22 * tmp21 tmp25 = tmp23 * tmp24 tmp27 = tmp25 + tmp26 tmp28 = 4.0 tmp29 = tmp27 * tmp28 tmp30 = tl.sigmoid(tmp29) tmp31 = tmp27 * tmp30 tl.debug_barrier() tl.store(in_out_ptr0 + (x0), tmp21, xmask) tl.store(in_out_ptr1 + (r1 + (64*x0)), tmp31, xmask) tl.store(out_ptr0 + (x0), tmp10, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 1, 1), (1, 1, 1, 1), torch.float32) buf1 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32) buf3 = reinterpret_tensor(buf1, (4, 1, 1, 1), (1, 1, 1, 1), 0); del buf1 # reuse buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf5 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [y, mul, sigmoid, y_1], Original ATen: [aten.native_group_norm, aten.mul, aten.sigmoid] stream0 = get_raw_stream(0) triton_per_fused_mul_native_group_norm_sigmoid_0.run(buf3, buf5, primals_1, primals_2, primals_3, buf0, 4, 64, grid=grid(4), stream=stream0) return (buf5, primals_1, primals_2, primals_3, buf0, buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class GroupNorm32(nn.GroupNorm): def __init__(self, num_groups, num_channels, swish, eps=1e-05): super().__init__(num_groups=num_groups, num_channels=num_channels, eps=eps) self.swish = swish def forward(self, x): y = super().forward(x.float()) if self.swish == 1.0: y = F.silu(y) elif self.swish: y = y * F.sigmoid(y * float(self.swish)) return y def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_groups': 1, 'num_channels': 4, 'swish': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_mul_native_group_norm_sigmoid_0(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex r3 = rindex // 16 tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0) tmp24 = tl.load(in_ptr1 + r3, None, eviction_policy='evict_last') tmp26 = tl.load(in_ptr2 + r3, None, eviction_policy='evict_last') tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tl.where(xmask, tmp1, 0) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 64, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tmp17 = 64.0 tmp18 = tmp16 / tmp17 tmp19 = 1e-05 tmp20 = tmp18 + tmp19 tmp21 = libdevice.rsqrt(tmp20) tmp22 = tmp0 - tmp10 tmp23 = tmp22 * tmp21 tmp25 = tmp23 * tmp24 tmp27 = tmp25 + tmp26 tmp28 = 4.0 tmp29 = tmp27 * tmp28 tmp30 = tl.sigmoid(tmp29) tmp31 = tmp27 * tmp30 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp21, xmask) tl.store(in_out_ptr1 + (r1 + 64 * x0), tmp31, xmask) tl.store(out_ptr0 + x0, tmp10, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 1, 1), (1, 1, 1, 1), torch.float32) buf1 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32) buf3 = reinterpret_tensor(buf1, (4, 1, 1, 1), (1, 1, 1, 1), 0) del buf1 buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf5 = buf4 del buf4 get_raw_stream(0) triton_per_fused_mul_native_group_norm_sigmoid_0[grid(4)](buf3, buf5, primals_1, primals_2, primals_3, buf0, 4, 64, XBLOCK=1, num_warps=2, num_stages=1) return buf5, primals_1, primals_2, primals_3, buf0, buf3 class GroupNorm32New(nn.GroupNorm): def __init__(self, num_groups, num_channels, swish, eps=1e-05): super().__init__(num_groups=num_groups, num_channels=num_channels, eps=eps) self.swish = swish def forward(self, input_0): primals_2 = self.weight primals_3 = self.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
ShivanshuPurohit/Diffusion
GroupNorm32
false
1,063
[ "MIT" ]
0
9a190d9aa4ed9767cf223e4ef57d0c31690f92cc
https://github.com/ShivanshuPurohit/Diffusion/tree/9a190d9aa4ed9767cf223e4ef57d0c31690f92cc
import torch import torch.nn as nn import torch.nn.functional as F class Model(nn.GroupNorm): def __init__(self, num_groups, num_channels, swish, eps=1e-05): super().__init__(num_groups=num_groups, num_channels=num_channels, eps=eps) self.swish = swish def forward(self, x): y = super().forward(x.float()) if self.swish == 1.0: y = F.silu(y) elif self.swish: y = y * F.sigmoid(y * float(self.swish)) return y def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [1, 4, 4]
HardSigmoid
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/n6/cn6mtix3xxpl4wrz6ijcstysgsoj75bwqjpwztauquzjojdcfz37.py # Topologically Sorted Source Nodes: [mul, x, x_1, neg, result, neg_1, result_1], Original ATen: [aten.mul, aten.add, aten.clamp, aten.neg, aten.threshold] # Source node to ATen node mapping: # mul => mul # neg => neg # neg_1 => neg_1 # result => full_default, le, where # result_1 => full_default_1, le_1, where_1 # x => add # x_1 => clamp_max, clamp_min # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, 0.2), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, 0.5), kwargs = {}) # %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%add, 0), kwargs = {}) # %clamp_max : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min, 1), kwargs = {}) # %neg : [num_users=2] = call_function[target=torch.ops.aten.neg.default](args = (%clamp_max,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%neg, -1), kwargs = {}) # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -1.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%le, %full_default, %neg), kwargs = {}) # %neg_1 : [num_users=2] = call_function[target=torch.ops.aten.neg.default](args = (%where,), kwargs = {}) # %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%neg_1, 0), kwargs = {}) # %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where_1 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%le_1, %full_default_1, %neg_1), kwargs = {}) triton_poi_fused_add_clamp_mul_neg_threshold_0 = async_compile.triton('triton_poi_fused_add_clamp_mul_neg_threshold_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_mul_neg_threshold_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_clamp_mul_neg_threshold_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 0.2 tmp2 = tmp0 * tmp1 tmp3 = 0.5 tmp4 = tmp2 + tmp3 tmp5 = 0.0 tmp6 = triton_helpers.maximum(tmp4, tmp5) tmp7 = 1.0 tmp8 = triton_helpers.minimum(tmp6, tmp7) tmp9 = -tmp8 tmp10 = -1.0 tmp11 = tmp9 <= tmp10 tmp12 = tl.where(tmp11, tmp10, tmp9) tmp13 = -tmp12 tmp14 = tmp13 <= tmp5 tmp15 = tl.where(tmp14, tmp5, tmp13) tl.store(out_ptr0 + (x0), tmp15, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul, x, x_1, neg, result, neg_1, result_1], Original ATen: [aten.mul, aten.add, aten.clamp, aten.neg, aten.threshold] stream0 = get_raw_stream(0) triton_poi_fused_add_clamp_mul_neg_threshold_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F class HardSigmoid(torch.nn.Module): """ Pytorch implementation of the hard sigmoid activation function """ def __init__(self): super(HardSigmoid, self).__init__() def forward(self, input): x = 0.2 * input + 0.5 x = torch.clamp(x, 0, 1) x = F.threshold(-x, -1, -1) x = F.threshold(-x, 0, 0) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_clamp_mul_neg_threshold_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.2 tmp2 = tmp0 * tmp1 tmp3 = 0.5 tmp4 = tmp2 + tmp3 tmp5 = 0.0 tmp6 = triton_helpers.maximum(tmp4, tmp5) tmp7 = 1.0 tmp8 = triton_helpers.minimum(tmp6, tmp7) tmp9 = -tmp8 tmp10 = -1.0 tmp11 = tmp9 <= tmp10 tmp12 = tl.where(tmp11, tmp10, tmp9) tmp13 = -tmp12 tmp14 = tmp13 <= tmp5 tmp15 = tl.where(tmp14, tmp5, tmp13) tl.store(out_ptr0 + x0, tmp15, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_clamp_mul_neg_threshold_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf0, class HardSigmoidNew(torch.nn.Module): """ Pytorch implementation of the hard sigmoid activation function """ def __init__(self): super(HardSigmoidNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
ShiraLightricks/3d-photo-inpainting
HardSigmoid
false
1,064
[ "MIT" ]
0
c42ac41576690b765e50f5281ddbfb58439ff36d
https://github.com/ShiraLightricks/3d-photo-inpainting/tree/c42ac41576690b765e50f5281ddbfb58439ff36d
import torch import torch.nn.functional as F class Model(torch.nn.Module): """ Pytorch implementation of the hard sigmoid activation function """ def __init__(self): super().__init__() def forward(self, input): x = 0.2 * input + 0.5 x = torch.clamp(x, 0, 1) x = F.threshold(-x, -1, -1) x = F.threshold(-x, 0, 0) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
Classifier
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/l3/cl35tzbhrd24dhunkbb6gjs54aklpyr46oikqhoylcgmkcmhujil.py # Topologically Sorted Source Nodes: [out], Original ATen: [aten.mean] # Source node to ATen node mapping: # out => mean # Graph fragment: # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%primals_1, [-1, -2], True), kwargs = {}) triton_per_fused_mean_0 = async_compile.triton('triton_per_fused_mean_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[16, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 16 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp5 = 16.0 tmp6 = tmp4 / tmp5 tl.debug_barrier() tl.store(in_out_ptr0 + (x0), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [out], Original ATen: [aten.mean] stream0 = get_raw_stream(0) triton_per_fused_mean_0.run(buf1, primals_1, 16, 16, grid=grid(16), stream=stream0) del primals_1 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.addmm] extern_kernels.addmm(primals_3, reinterpret_tensor(buf1, (4, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_2 del primals_3 return (buf2, reinterpret_tensor(buf1, (4, 4), (4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn from abc import * class Classifier(nn.Module): def __init__(self, in_channels, num_classes): super(Classifier, self).__init__() self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) self.fc = nn.Linear(in_channels, num_classes) def forward(self, x): out = self.avgpool(x) out = torch.flatten(out, 1) out = self.fc(out) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'num_classes': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn from abc import * assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp5 = 16.0 tmp6 = tmp4 / tmp5 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp6, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_mean_0[grid(16)](buf1, primals_1, 16, 16, XBLOCK=1, num_warps=2, num_stages=1) del primals_1 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_3, reinterpret_tensor(buf1, (4, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), alpha =1, beta=1, out=buf2) del primals_2 del primals_3 return buf2, reinterpret_tensor(buf1, (4, 4), (4, 1), 0) class ClassifierNew(nn.Module): def __init__(self, in_channels, num_classes): super(ClassifierNew, self).__init__() self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) self.fc = nn.Linear(in_channels, num_classes) def forward(self, input_0): primals_2 = self.fc.weight primals_3 = self.fc.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
Slime0519/simple-faster-rcnn-pytorch
Classifier
false
1,065
[ "MIT" ]
0
0503e9b4d07a24ae0bc1789a61ed937709f5304c
https://github.com/Slime0519/simple-faster-rcnn-pytorch/tree/0503e9b4d07a24ae0bc1789a61ed937709f5304c
import torch import torch.nn as nn from abc import * class Model(nn.Module): def __init__(self, in_channels, num_classes): super().__init__() self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) self.fc = nn.Linear(in_channels, num_classes) def forward(self, x): out = self.avgpool(x) out = torch.flatten(out, 1) out = self.fc(out) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 4]
TemporalAttention
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/7j/c7jy63he3fhmqd62giamt227766vsitw2rh2xrr6mmcphr7netrz.py # Topologically Sorted Source Nodes: [add, add_1, tanh], Original ATen: [aten.add, aten.tanh] # Source node to ATen node mapping: # add => add # add_1 => add_1 # tanh => tanh # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%expand, %view_3), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %primals_5), kwargs = {}) # %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%add_1,), kwargs = {}) triton_poi_fused_add_tanh_0 = async_compile.triton('triton_poi_fused_add_tanh_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_tanh_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_tanh_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = (xindex // 64) x4 = xindex % 16 x5 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x4 + (16*x3)), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_out_ptr0 + (x5), xmask) tmp3 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp5 = libdevice.tanh(tmp4) tl.store(in_out_ptr0 + (x5), tmp5, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/dm/cdmkcxuzpnailvibeivaikqdr4zvashgzwju7qijhq5aizlo3aor.py # Topologically Sorted Source Nodes: [weights], Original ATen: [aten._softmax] # Source node to ATen node mapping: # weights => amax, exp, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_5, [1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_5, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = (xindex // 16) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (16*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (4 + x0 + (16*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (8 + x0 + (16*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (12 + x0 + (16*x2)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x3), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/kt/cktghousutx6xui2sl2rvevzmb7gkacvfhntjq5n2xzeu7v57oz6.py # Topologically Sorted Source Nodes: [weights], Original ATen: [aten._softmax] # Source node to ATen node mapping: # weights => div, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = (xindex // 16) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (16*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (4 + x0 + (16*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (8 + x0 + (16*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (12 + x0 + (16*x2)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x3), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/de/cde5wp4wyi2tqgfwsvoxmxb77ndgieetn2kkhp3yursmtfolgix6.py # Topologically Sorted Source Nodes: [weighted_feats, attn_feats], Original ATen: [aten.mul, aten.sum] # Source node to ATen node mapping: # attn_feats => sum_2 # weighted_feats => mul # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_4, %div), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {}) triton_poi_fused_mul_sum_3 = async_compile.triton('triton_poi_fused_mul_sum_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sum_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_sum_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = (xindex // 16) x3 = xindex % 16 x1 = (xindex // 4) % 4 x4 = xindex tmp0 = tl.load(in_ptr0 + (x3 + (64*x2)), xmask) tmp1 = tl.load(in_ptr1 + (x1 + (16*x2)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (16 + x3 + (64*x2)), xmask) tmp4 = tl.load(in_ptr1 + (4 + x1 + (16*x2)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (32 + x3 + (64*x2)), xmask) tmp8 = tl.load(in_ptr1 + (8 + x1 + (16*x2)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (48 + x3 + (64*x2)), xmask) tmp12 = tl.load(in_ptr1 + (12 + x1 + (16*x2)), xmask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 * tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 * tmp12 tmp14 = tmp10 + tmp13 tl.store(out_ptr0 + (x4), tmp14, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (1, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [Wh], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [Uv], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_4, (64, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf1) del primals_3 buf2 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf1 # reuse # Topologically Sorted Source Nodes: [add, add_1, tanh], Original ATen: [aten.add, aten.tanh] stream0 = get_raw_stream(0) triton_poi_fused_add_tanh_0.run(buf2, buf0, primals_5, 256, grid=grid(256), stream=stream0) del primals_5 buf3 = reinterpret_tensor(buf0, (64, 1), (1, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [energies], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 1), (1, 4), 0), out=buf3) buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) # Topologically Sorted Source Nodes: [weights], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf3, buf4, 64, grid=grid(64), stream=stream0) buf5 = reinterpret_tensor(buf3, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf3 # reuse # Topologically Sorted Source Nodes: [weights], Original ATen: [aten._softmax] triton_poi_fused__softmax_2.run(buf4, buf5, 64, grid=grid(64), stream=stream0) buf6 = reinterpret_tensor(buf4, (4, 4, 4), (16, 4, 1), 0); del buf4 # reuse # Topologically Sorted Source Nodes: [weighted_feats, attn_feats], Original ATen: [aten.mul, aten.sum] triton_poi_fused_mul_sum_3.run(primals_4, buf5, buf6, 64, grid=grid(64), stream=stream0) return (buf6, buf5, primals_4, reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), buf2, buf5, primals_6, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class TemporalAttention(nn.Module): def __init__(self, hidden_size, feat_size, bottleneck_size): super(TemporalAttention, self).__init__() self.hidden_size = hidden_size self.feat_size = feat_size self.bottleneck_size = bottleneck_size self.W = nn.Linear(self.hidden_size, self.bottleneck_size, bias=False) self.U = nn.Linear(self.feat_size, self.bottleneck_size, bias=False) self.b = nn.Parameter(torch.ones(self.bottleneck_size), requires_grad=True) self.w = nn.Linear(self.bottleneck_size, 1, bias=False) def forward(self, hidden, feats, masks=None): Wh = self.W(hidden) Uv = self.U(feats) Wh = Wh.unsqueeze(1).expand_as(Uv) energies = self.w(torch.tanh(Wh + Uv + self.b)) if masks is not None: energies = energies.squeeze(2) energies[~masks] = -float('inf') energies = energies.unsqueeze(2) weights = F.softmax(energies, dim=1) weighted_feats = feats * weights attn_feats = weighted_feats.sum(dim=1) return attn_feats, weights def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'hidden_size': 4, 'feat_size': 4, 'bottleneck_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_tanh_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex // 64 x4 = xindex % 16 x5 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x4 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp1 = tl.load(in_out_ptr0 + x5, xmask) tmp3 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp5 = libdevice.tanh(tmp4) tl.store(in_out_ptr0 + x5, tmp5, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = xindex // 16 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x3, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = xindex // 16 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) @triton.jit def triton_poi_fused_mul_sum_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex // 16 x3 = xindex % 16 x1 = xindex // 4 % 4 x4 = xindex tmp0 = tl.load(in_ptr0 + (x3 + 64 * x2), xmask) tmp1 = tl.load(in_ptr1 + (x1 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (16 + x3 + 64 * x2), xmask) tmp4 = tl.load(in_ptr1 + (4 + x1 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp7 = tl.load(in_ptr0 + (32 + x3 + 64 * x2), xmask) tmp8 = tl.load(in_ptr1 + (8 + x1 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp11 = tl.load(in_ptr0 + (48 + x3 + 64 * x2), xmask) tmp12 = tl.load(in_ptr1 + (12 + x1 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tmp0 * tmp1 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 * tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 * tmp12 tmp14 = tmp10 + tmp13 tl.store(out_ptr0 + x4, tmp14, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (1, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_4, (64, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf1) del primals_3 buf2 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf1 get_raw_stream(0) triton_poi_fused_add_tanh_0[grid(256)](buf2, buf0, primals_5, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 buf3 = reinterpret_tensor(buf0, (64, 1), (1, 1), 0) del buf0 extern_kernels.mm(reinterpret_tensor(buf2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 1), (1, 4), 0), out=buf3) buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) triton_poi_fused__softmax_1[grid(64)](buf3, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1) buf5 = reinterpret_tensor(buf3, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf3 triton_poi_fused__softmax_2[grid(64)](buf4, buf5, 64, XBLOCK=64, num_warps=1, num_stages=1) buf6 = reinterpret_tensor(buf4, (4, 4, 4), (16, 4, 1), 0) del buf4 triton_poi_fused_mul_sum_3[grid(64)](primals_4, buf5, buf6, 64, XBLOCK=64, num_warps=1, num_stages=1) return buf6, buf5, primals_4, reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), buf2, buf5, primals_6 class TemporalAttentionNew(nn.Module): def __init__(self, hidden_size, feat_size, bottleneck_size): super(TemporalAttentionNew, self).__init__() self.hidden_size = hidden_size self.feat_size = feat_size self.bottleneck_size = bottleneck_size self.W = nn.Linear(self.hidden_size, self.bottleneck_size, bias=False) self.U = nn.Linear(self.feat_size, self.bottleneck_size, bias=False) self.b = nn.Parameter(torch.ones(self.bottleneck_size), requires_grad=True) self.w = nn.Linear(self.bottleneck_size, 1, bias=False) def forward(self, input_0, input_1): primals_5 = self.b primals_1 = self.W.weight primals_3 = self.U.weight primals_6 = self.w.weight primals_2 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0], output[1]
Shashwat07gupta/MSVD
TemporalAttention
false
1,066
[ "MIT" ]
0
8026557ef7681a504b5140560ec4aaad9944de2d
https://github.com/Shashwat07gupta/MSVD/tree/8026557ef7681a504b5140560ec4aaad9944de2d
import torch import torch.nn as nn import torch.nn.functional as F class Model(nn.Module): def __init__(self, hidden_size, feat_size, bottleneck_size): super().__init__() self.hidden_size = hidden_size self.feat_size = feat_size self.bottleneck_size = bottleneck_size self.W = nn.Linear(self.hidden_size, self.bottleneck_size, bias=False) self.U = nn.Linear(self.feat_size, self.bottleneck_size, bias=False) self.b = nn.Parameter(torch.ones(self.bottleneck_size), requires_grad=True) self.w = nn.Linear(self.bottleneck_size, 1, bias=False) def forward(self, hidden, feats, masks=None): Wh = self.W(hidden) Uv = self.U(feats) Wh = Wh.unsqueeze(1).expand_as(Uv) energies = self.w(torch.tanh(Wh + Uv + self.b)) if masks is not None: energies = energies.squeeze(2) energies[~masks] = -float('inf') energies = energies.unsqueeze(2) weights = F.softmax(energies, dim=1) weighted_feats = feats * weights attn_feats = weighted_feats.sum(dim=1) return attn_feats, weights def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 4, 4]
FastRNNCell
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/tb/ctbc62iwsfjylunbfj67j7mqt6piotcv6s56drmljjg2lp7vfyhv.py # Topologically Sorted Source Nodes: [pre_comp, add_1, c, sigmoid, mul, sigmoid_1, mul_1, new_h], Original ATen: [aten.add, aten.tanh, aten.sigmoid, aten.mul] # Source node to ATen node mapping: # add_1 => add_1 # c => tanh # mul => mul # mul_1 => mul_1 # new_h => add_2 # pre_comp => add # sigmoid => sigmoid # sigmoid_1 => sigmoid_1 # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %view_3), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %primals_5), kwargs = {}) # %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%add_1,), kwargs = {}) # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%primals_6,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %primals_4), kwargs = {}) # %sigmoid_1 : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%primals_7,), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid_1, %tanh), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {}) triton_poi_fused_add_mul_sigmoid_tanh_0 = async_compile.triton('triton_poi_fused_add_mul_sigmoid_tanh_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_sigmoid_tanh_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_sigmoid_tanh_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x2), xmask) tmp3 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr2 + (0)) tmp7 = tl.broadcast_to(tmp6, [XBLOCK]) tmp9 = tl.load(in_ptr3 + (x2), xmask) tmp11 = tl.load(in_ptr4 + (0)) tmp12 = tl.broadcast_to(tmp11, [XBLOCK]) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp5 = libdevice.tanh(tmp4) tmp8 = tl.sigmoid(tmp7) tmp10 = tmp8 * tmp9 tmp13 = tl.sigmoid(tmp12) tmp14 = tmp13 * tmp5 tmp15 = tmp10 + tmp14 tl.store(in_out_ptr0 + (x2), tmp5, xmask) tl.store(out_ptr0 + (x2), tmp15, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_5, (1, 4), (4, 1)) assert_size_stride(primals_6, (1, 1), (1, 1)) assert_size_stride(primals_7, (1, 1), (1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [wComp], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), primals_1, out=buf0) del primals_1 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [uComp], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_4, (64, 4), (4, 1), 0), primals_3, out=buf1) del primals_3 buf2 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [pre_comp, add_1, c, sigmoid, mul, sigmoid_1, mul_1, new_h], Original ATen: [aten.add, aten.tanh, aten.sigmoid, aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_add_mul_sigmoid_tanh_0.run(buf2, buf1, primals_5, primals_6, primals_4, primals_7, buf3, 256, grid=grid(256), stream=stream0) del buf1 del primals_5 return (buf3, primals_4, primals_6, primals_7, buf2, reinterpret_tensor(primals_2, (4, 64), (1, 4), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((1, 1), (1, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((1, 1), (1, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.onnx from itertools import product as product def gen_nonlinearity(A, nonlinearity): """ Returns required activation for a tensor based on the inputs nonlinearity is either a callable or a value in ['tanh', 'sigmoid', 'relu', 'quantTanh', 'quantSigm', 'quantSigm4'] """ if nonlinearity == 'tanh': return torch.tanh(A) elif nonlinearity == 'sigmoid': return torch.sigmoid(A) elif nonlinearity == 'relu': return torch.relu(A, 0.0) elif nonlinearity == 'quantTanh': return torch.max(torch.min(A, torch.ones_like(A)), -1.0 * torch. ones_like(A)) elif nonlinearity == 'quantSigm': A = (A + 1.0) / 2.0 return torch.max(torch.min(A, torch.ones_like(A)), torch.zeros_like(A)) elif nonlinearity == 'quantSigm4': A = (A + 2.0) / 4.0 return torch.max(torch.min(A, torch.ones_like(A)), torch.zeros_like(A)) else: if not callable(nonlinearity): raise ValueError( 'nonlinearity is either a callable or a value ' + "['tanh', 'sigmoid', 'relu', 'quantTanh', " + "'quantSigm'") return nonlinearity(A) class RNNCell(nn.Module): def __init__(self, input_size, hidden_size, gate_nonlinearity, update_nonlinearity, num_W_matrices, num_U_matrices, num_biases, wRank=None, uRank=None, wSparsity=1.0, uSparsity=1.0): super(RNNCell, self).__init__() self._input_size = input_size self._hidden_size = hidden_size self._gate_nonlinearity = gate_nonlinearity self._update_nonlinearity = update_nonlinearity self._num_W_matrices = num_W_matrices self._num_U_matrices = num_U_matrices self._num_biases = num_biases self._num_weight_matrices = [self._num_W_matrices, self. _num_U_matrices, self._num_biases] self._wRank = wRank self._uRank = uRank self._wSparsity = wSparsity self._uSparsity = uSparsity self.oldmats = [] @property def state_size(self): return self._hidden_size @property def input_size(self): return self._input_size @property def output_size(self): return self._hidden_size @property def gate_nonlinearity(self): return self._gate_nonlinearity @property def update_nonlinearity(self): return self._update_nonlinearity @property def wRank(self): return self._wRank @property def uRank(self): return self._uRank @property def num_W_matrices(self): return self._num_W_matrices @property def num_U_matrices(self): return self._num_U_matrices @property def num_weight_matrices(self): return self._num_weight_matrices @property def name(self): raise NotImplementedError() def forward(self, input, state): raise NotImplementedError() def getVars(self): raise NotImplementedError() def get_model_size(self): """ Function to get aimed model size """ mats = self.getVars() endW = self._num_W_matrices endU = endW + self._num_U_matrices totalnnz = 2 for i in range(0, endW): mats[i].device totalnnz += utils.countNNZ(mats[i].cpu(), self._wSparsity) mats[i] for i in range(endW, endU): mats[i].device totalnnz += utils.countNNZ(mats[i].cpu(), self._uSparsity) mats[i] for i in range(endU, len(mats)): mats[i].device totalnnz += utils.countNNZ(mats[i].cpu(), False) mats[i] return totalnnz * 4 def copy_previous_UW(self): mats = self.getVars() num_mats = self._num_W_matrices + self._num_U_matrices if len(self.oldmats) != num_mats: for i in range(num_mats): self.oldmats.append(torch.FloatTensor()) for i in range(num_mats): self.oldmats[i] = torch.FloatTensor(mats[i].detach().clone()) def sparsify(self): mats = self.getVars() endW = self._num_W_matrices endU = endW + self._num_U_matrices for i in range(0, endW): mats[i] = utils.hardThreshold(mats[i], self._wSparsity) for i in range(endW, endU): mats[i] = utils.hardThreshold(mats[i], self._uSparsity) self.copy_previous_UW() def sparsifyWithSupport(self): mats = self.getVars() endU = self._num_W_matrices + self._num_U_matrices for i in range(0, endU): mats[i] = utils.supportBasedThreshold(mats[i], self.oldmats[i]) class FastRNNCell(RNNCell): """ FastRNN Cell with Both Full Rank and Low Rank Formulations Has multiple activation functions for the gates hidden_size = # hidden units update_nonlinearity = nonlinearity for final rnn update can be chosen from [tanh, sigmoid, relu, quantTanh, quantSigm] wRank = rank of W matrix (creates two matrices if not None) uRank = rank of U matrix (creates two matrices if not None) wSparsity = intended sparsity of W matrix(ces) uSparsity = intended sparsity of U matrix(ces) Warning: The Cell will not automatically sparsify. The user must invoke .sparsify to hard threshold. alphaInit = init for alpha, the update scalar betaInit = init for beta, the weight for previous state FastRNN architecture and compression techniques are found in FastGRNN(LINK) paper Basic architecture is like: h_t^ = update_nl(Wx_t + Uh_{t-1} + B_h) h_t = sigmoid(beta)*h_{t-1} + sigmoid(alpha)*h_t^ W and U can further parameterised into low rank version by W = matmul(W_1, W_2) and U = matmul(U_1, U_2) """ def __init__(self, input_size, hidden_size, update_nonlinearity='tanh', wRank=None, uRank=None, wSparsity=1.0, uSparsity=1.0, alphaInit=- 3.0, betaInit=3.0, name='FastRNN'): super(FastRNNCell, self).__init__(input_size, hidden_size, None, update_nonlinearity, 1, 1, 1, wRank, uRank, wSparsity, uSparsity) self._alphaInit = alphaInit self._betaInit = betaInit if wRank is not None: self._num_W_matrices += 1 self._num_weight_matrices[0] = self._num_W_matrices if uRank is not None: self._num_U_matrices += 1 self._num_weight_matrices[1] = self._num_U_matrices self._name = name if wRank is None: self.W = nn.Parameter(0.1 * torch.randn([input_size, hidden_size])) else: self.W1 = nn.Parameter(0.1 * torch.randn([input_size, wRank])) self.W2 = nn.Parameter(0.1 * torch.randn([wRank, hidden_size])) if uRank is None: self.U = nn.Parameter(0.1 * torch.randn([hidden_size, hidden_size]) ) else: self.U1 = nn.Parameter(0.1 * torch.randn([hidden_size, uRank])) self.U2 = nn.Parameter(0.1 * torch.randn([uRank, hidden_size])) self.bias_update = nn.Parameter(torch.ones([1, hidden_size])) self.alpha = nn.Parameter(self._alphaInit * torch.ones([1, 1])) self.beta = nn.Parameter(self._betaInit * torch.ones([1, 1])) @property def name(self): return self._name @property def cellType(self): return 'FastRNN' def forward(self, input, state): if self._wRank is None: wComp = torch.matmul(input, self.W) else: wComp = torch.matmul(torch.matmul(input, self.W1), self.W2) if self._uRank is None: uComp = torch.matmul(state, self.U) else: uComp = torch.matmul(torch.matmul(state, self.U1), self.U2) pre_comp = wComp + uComp c = gen_nonlinearity(pre_comp + self.bias_update, self. _update_nonlinearity) new_h = torch.sigmoid(self.beta) * state + torch.sigmoid(self.alpha ) * c return new_h def getVars(self): Vars = [] if self._num_W_matrices == 1: Vars.append(self.W) else: Vars.extend([self.W1, self.W2]) if self._num_U_matrices == 1: Vars.append(self.U) else: Vars.extend([self.U1, self.U2]) Vars.extend([self.bias_update]) Vars.extend([self.alpha, self.beta]) return Vars def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_size': 4, 'hidden_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.onnx from itertools import product as product assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_mul_sigmoid_tanh_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr2 + 0) tmp7 = tl.broadcast_to(tmp6, [XBLOCK]) tmp9 = tl.load(in_ptr3 + x2, xmask) tmp11 = tl.load(in_ptr4 + 0) tmp12 = tl.broadcast_to(tmp11, [XBLOCK]) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp5 = libdevice.tanh(tmp4) tmp8 = tl.sigmoid(tmp7) tmp10 = tmp8 * tmp9 tmp13 = tl.sigmoid(tmp12) tmp14 = tmp13 * tmp5 tmp15 = tmp10 + tmp14 tl.store(in_out_ptr0 + x2, tmp5, xmask) tl.store(out_ptr0 + x2, tmp15, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_5, (1, 4), (4, 1)) assert_size_stride(primals_6, (1, 1), (1, 1)) assert_size_stride(primals_7, (1, 1), (1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), primals_1, out=buf0) del primals_1 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_4, (64, 4), (4, 1), 0), primals_3, out=buf1) del primals_3 buf2 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mul_sigmoid_tanh_0[grid(256)](buf2, buf1, primals_5, primals_6, primals_4, primals_7, buf3, 256, XBLOCK= 128, num_warps=4, num_stages=1) del buf1 del primals_5 return buf3, primals_4, primals_6, primals_7, buf2, reinterpret_tensor( primals_2, (4, 64), (1, 4), 0) def gen_nonlinearity(A, nonlinearity): """ Returns required activation for a tensor based on the inputs nonlinearity is either a callable or a value in ['tanh', 'sigmoid', 'relu', 'quantTanh', 'quantSigm', 'quantSigm4'] """ if nonlinearity == 'tanh': return torch.tanh(A) elif nonlinearity == 'sigmoid': return torch.sigmoid(A) elif nonlinearity == 'relu': return torch.relu(A, 0.0) elif nonlinearity == 'quantTanh': return torch.max(torch.min(A, torch.ones_like(A)), -1.0 * torch. ones_like(A)) elif nonlinearity == 'quantSigm': A = (A + 1.0) / 2.0 return torch.max(torch.min(A, torch.ones_like(A)), torch.zeros_like(A)) elif nonlinearity == 'quantSigm4': A = (A + 2.0) / 4.0 return torch.max(torch.min(A, torch.ones_like(A)), torch.zeros_like(A)) else: if not callable(nonlinearity): raise ValueError( 'nonlinearity is either a callable or a value ' + "['tanh', 'sigmoid', 'relu', 'quantTanh', " + "'quantSigm'") return nonlinearity(A) class RNNCell(nn.Module): def __init__(self, input_size, hidden_size, gate_nonlinearity, update_nonlinearity, num_W_matrices, num_U_matrices, num_biases, wRank=None, uRank=None, wSparsity=1.0, uSparsity=1.0): super(RNNCell, self).__init__() self._input_size = input_size self._hidden_size = hidden_size self._gate_nonlinearity = gate_nonlinearity self._update_nonlinearity = update_nonlinearity self._num_W_matrices = num_W_matrices self._num_U_matrices = num_U_matrices self._num_biases = num_biases self._num_weight_matrices = [self._num_W_matrices, self. _num_U_matrices, self._num_biases] self._wRank = wRank self._uRank = uRank self._wSparsity = wSparsity self._uSparsity = uSparsity self.oldmats = [] @property def state_size(self): return self._hidden_size @property def input_size(self): return self._input_size @property def output_size(self): return self._hidden_size @property def gate_nonlinearity(self): return self._gate_nonlinearity @property def update_nonlinearity(self): return self._update_nonlinearity @property def wRank(self): return self._wRank @property def uRank(self): return self._uRank @property def num_W_matrices(self): return self._num_W_matrices @property def num_U_matrices(self): return self._num_U_matrices @property def num_weight_matrices(self): return self._num_weight_matrices @property def name(self): raise NotImplementedError() def forward(self, input, state): raise NotImplementedError() def getVars(self): raise NotImplementedError() def get_model_size(self): """ Function to get aimed model size """ mats = self.getVars() endW = self._num_W_matrices endU = endW + self._num_U_matrices totalnnz = 2 for i in range(0, endW): mats[i].device totalnnz += utils.countNNZ(mats[i].cpu(), self._wSparsity) mats[i] for i in range(endW, endU): mats[i].device totalnnz += utils.countNNZ(mats[i].cpu(), self._uSparsity) mats[i] for i in range(endU, len(mats)): mats[i].device totalnnz += utils.countNNZ(mats[i].cpu(), False) mats[i] return totalnnz * 4 def copy_previous_UW(self): mats = self.getVars() num_mats = self._num_W_matrices + self._num_U_matrices if len(self.oldmats) != num_mats: for i in range(num_mats): self.oldmats.append(torch.FloatTensor()) for i in range(num_mats): self.oldmats[i] = torch.FloatTensor(mats[i].detach().clone()) def sparsify(self): mats = self.getVars() endW = self._num_W_matrices endU = endW + self._num_U_matrices for i in range(0, endW): mats[i] = utils.hardThreshold(mats[i], self._wSparsity) for i in range(endW, endU): mats[i] = utils.hardThreshold(mats[i], self._uSparsity) self.copy_previous_UW() def sparsifyWithSupport(self): mats = self.getVars() endU = self._num_W_matrices + self._num_U_matrices for i in range(0, endU): mats[i] = utils.supportBasedThreshold(mats[i], self.oldmats[i]) class FastRNNCellNew(RNNCell): """ FastRNN Cell with Both Full Rank and Low Rank Formulations Has multiple activation functions for the gates hidden_size = # hidden units update_nonlinearity = nonlinearity for final rnn update can be chosen from [tanh, sigmoid, relu, quantTanh, quantSigm] wRank = rank of W matrix (creates two matrices if not None) uRank = rank of U matrix (creates two matrices if not None) wSparsity = intended sparsity of W matrix(ces) uSparsity = intended sparsity of U matrix(ces) Warning: The Cell will not automatically sparsify. The user must invoke .sparsify to hard threshold. alphaInit = init for alpha, the update scalar betaInit = init for beta, the weight for previous state FastRNN architecture and compression techniques are found in FastGRNN(LINK) paper Basic architecture is like: h_t^ = update_nl(Wx_t + Uh_{t-1} + B_h) h_t = sigmoid(beta)*h_{t-1} + sigmoid(alpha)*h_t^ W and U can further parameterised into low rank version by W = matmul(W_1, W_2) and U = matmul(U_1, U_2) """ def __init__(self, input_size, hidden_size, update_nonlinearity='tanh', wRank=None, uRank=None, wSparsity=1.0, uSparsity=1.0, alphaInit=- 3.0, betaInit=3.0, name='FastRNN'): super(FastRNNCellNew, self).__init__(input_size, hidden_size, None, update_nonlinearity, 1, 1, 1, wRank, uRank, wSparsity, uSparsity) self._alphaInit = alphaInit self._betaInit = betaInit if wRank is not None: self._num_W_matrices += 1 self._num_weight_matrices[0] = self._num_W_matrices if uRank is not None: self._num_U_matrices += 1 self._num_weight_matrices[1] = self._num_U_matrices self._name = name if wRank is None: self.W = nn.Parameter(0.1 * torch.randn([input_size, hidden_size])) else: self.W1 = nn.Parameter(0.1 * torch.randn([input_size, wRank])) self.W2 = nn.Parameter(0.1 * torch.randn([wRank, hidden_size])) if uRank is None: self.U = nn.Parameter(0.1 * torch.randn([hidden_size, hidden_size]) ) else: self.U1 = nn.Parameter(0.1 * torch.randn([hidden_size, uRank])) self.U2 = nn.Parameter(0.1 * torch.randn([uRank, hidden_size])) self.bias_update = nn.Parameter(torch.ones([1, hidden_size])) self.alpha = nn.Parameter(self._alphaInit * torch.ones([1, 1])) self.beta = nn.Parameter(self._betaInit * torch.ones([1, 1])) @property def name(self): return self._name @property def cellType(self): return 'FastRNN' def getVars(self): Vars = [] if self._num_W_matrices == 1: Vars.append(self.W) else: Vars.extend([self.W1, self.W2]) if self._num_U_matrices == 1: Vars.append(self.U) else: Vars.extend([self.U1, self.U2]) Vars.extend([self.bias_update]) Vars.extend([self.alpha, self.beta]) return Vars def forward(self, input_0, input_1): primals_1 = self.W primals_3 = self.U primals_5 = self.bias_update primals_6 = self.alpha primals_7 = self.beta primals_2 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
ShishirPatil/EdgeML-1
FastRNNCell
false
1,067
[ "MIT" ]
0
cbba9f8b989e545788427c004eb8450e7e4c1a21
https://github.com/ShishirPatil/EdgeML-1/tree/cbba9f8b989e545788427c004eb8450e7e4c1a21
import torch import torch.nn as nn import torch.onnx from itertools import product as product def gen_nonlinearity(A, nonlinearity): """ Returns required activation for a tensor based on the inputs nonlinearity is either a callable or a value in ['tanh', 'sigmoid', 'relu', 'quantTanh', 'quantSigm', 'quantSigm4'] """ if nonlinearity == 'tanh': return torch.tanh(A) elif nonlinearity == 'sigmoid': return torch.sigmoid(A) elif nonlinearity == 'relu': return torch.relu(A, 0.0) elif nonlinearity == 'quantTanh': return torch.max(torch.min(A, torch.ones_like(A)), -1.0 * torch. ones_like(A)) elif nonlinearity == 'quantSigm': A = (A + 1.0) / 2.0 return torch.max(torch.min(A, torch.ones_like(A)), torch.zeros_like(A)) elif nonlinearity == 'quantSigm4': A = (A + 2.0) / 4.0 return torch.max(torch.min(A, torch.ones_like(A)), torch.zeros_like(A)) else: if not callable(nonlinearity): raise ValueError( 'nonlinearity is either a callable or a value ' + "['tanh', 'sigmoid', 'relu', 'quantTanh', " + "'quantSigm'") return nonlinearity(A) class RNNCell(nn.Module): def __init__(self, input_size, hidden_size, gate_nonlinearity, update_nonlinearity, num_W_matrices, num_U_matrices, num_biases, wRank=None, uRank=None, wSparsity=1.0, uSparsity=1.0): super().__init__() self._input_size = input_size self._hidden_size = hidden_size self._gate_nonlinearity = gate_nonlinearity self._update_nonlinearity = update_nonlinearity self._num_W_matrices = num_W_matrices self._num_U_matrices = num_U_matrices self._num_biases = num_biases self._num_weight_matrices = [self._num_W_matrices, self. _num_U_matrices, self._num_biases] self._wRank = wRank self._uRank = uRank self._wSparsity = wSparsity self._uSparsity = uSparsity self.oldmats = [] @property def state_size(self): return self._hidden_size @property def input_size(self): return self._input_size @property def output_size(self): return self._hidden_size @property def gate_nonlinearity(self): return self._gate_nonlinearity @property def update_nonlinearity(self): return self._update_nonlinearity @property def wRank(self): return self._wRank @property def uRank(self): return self._uRank @property def num_W_matrices(self): return self._num_W_matrices @property def num_U_matrices(self): return self._num_U_matrices @property def num_weight_matrices(self): return self._num_weight_matrices @property def name(self): raise NotImplementedError() def forward(self, input, state): raise NotImplementedError() def getVars(self): raise NotImplementedError() def get_model_size(self): """ Function to get aimed model size """ mats = self.getVars() endW = self._num_W_matrices endU = endW + self._num_U_matrices totalnnz = 2 for i in range(0, endW): mats[i].device totalnnz += utils.countNNZ(mats[i].cpu(), self._wSparsity) mats[i] for i in range(endW, endU): mats[i].device totalnnz += utils.countNNZ(mats[i].cpu(), self._uSparsity) mats[i] for i in range(endU, len(mats)): mats[i].device totalnnz += utils.countNNZ(mats[i].cpu(), False) mats[i] return totalnnz * 4 def copy_previous_UW(self): mats = self.getVars() num_mats = self._num_W_matrices + self._num_U_matrices if len(self.oldmats) != num_mats: for i in range(num_mats): self.oldmats.append(to # ... truncated (>4000 chars) for memory efficiency
Downsample
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/cu/ccutvo2v4333pq6xhrg2zryqqwthm7dmmuqprvva2xdwiodpz5jn.py # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] # Source node to ATen node mapping: # conv2d => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [2, 2], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 4) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 2, 2), (16, 4, 2, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] stream0 = get_raw_stream(0) triton_poi_fused_convolution_0.run(buf1, primals_2, 64, grid=grid(64), stream=stream0) del primals_2 return (buf1, primals_1, primals_3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import numpy as np class BaseModule(torch.nn.Module): def __init__(self): super(BaseModule, self).__init__() @property def nparams(self): """ Returns number of trainable parameters of the module. """ num_params = 0 for name, param in self.named_parameters(): if param.requires_grad: num_params += np.prod(param.detach().cpu().numpy().shape) return num_params def relocate_input(self, x: 'list'): """ Relocates provided tensors to the same device set for the module. """ device = next(self.parameters()).device for i in range(len(x)): if isinstance(x[i], torch.Tensor) and x[i].device != device: x[i] = x[i] return x class Downsample(BaseModule): def __init__(self, dim): super(Downsample, self).__init__() self.conv = torch.nn.Conv2d(dim, dim, 3, 2, 1) def forward(self, x): return self.conv(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import numpy as np assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 2, 2), (16, 4, 2, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(64)](buf1, primals_2, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_2 return buf1, primals_1, primals_3 class BaseModule(torch.nn.Module): def __init__(self): super(BaseModule, self).__init__() @property def nparams(self): """ Returns number of trainable parameters of the module. """ num_params = 0 for name, param in self.named_parameters(): if param.requires_grad: num_params += np.prod(param.detach().cpu().numpy().shape) return num_params def relocate_input(self, x: 'list'): """ Relocates provided tensors to the same device set for the module. """ device = next(self.parameters()).device for i in range(len(x)): if isinstance(x[i], torch.Tensor) and x[i].device != device: x[i] = x[i] return x class DownsampleNew(BaseModule): def __init__(self, dim): super(DownsampleNew, self).__init__() self.conv = torch.nn.Conv2d(dim, dim, 3, 2, 1) def forward(self, input_0): primals_1 = self.conv.weight primals_2 = self.conv.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
Sobsz/uberduck-ml-dev
Downsample
false
1,068
[ "Apache-2.0" ]
0
f099238f6f2e3f600d72d89dea3c883c59d91387
https://github.com/Sobsz/uberduck-ml-dev/tree/f099238f6f2e3f600d72d89dea3c883c59d91387
import torch import numpy as np class BaseModule(torch.nn.Module): def __init__(self): super().__init__() @property def nparams(self): """ Returns number of trainable parameters of the module. """ num_params = 0 for name, param in self.named_parameters(): if param.requires_grad: num_params += np.prod(param.detach().cpu().numpy().shape) return num_params def relocate_input(self, x: 'list'): """ Relocates provided tensors to the same device set for the module. """ device = next(self.parameters()).device for i in range(len(x)): if isinstance(x[i], torch.Tensor) and x[i].device != device: x[i] = x[i] return x class Model(BaseModule): def __init__(self, dim): super().__init__() self.conv = torch.nn.Conv2d(dim, dim, 3, 2, 1) def forward(self, x): return self.conv(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4]
AddFunction
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/zi/czioyfiql36jvbru3amu3iggyuvnn5c4pypwuaiss36muc2jqtqb.py # Topologically Sorted Source Nodes: [add], Original ATen: [aten.add] # Source node to ATen node mapping: # add => add # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg0_1, %arg1_1), kwargs = {}) triton_poi_fused_add_0 = async_compile.triton('triton_poi_fused_add_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask) tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [add], Original ATen: [aten.add] stream0 = get_raw_stream(0) triton_poi_fused_add_0.run(arg0_1, arg1_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 del arg1_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data import torch.utils.data.distributed class AddFunction(nn.Module): def __init__(self): super(AddFunction, self).__init__() def forward(self, x, y): return x + y def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data import torch.utils.data.distributed assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_0[grid(256)](arg0_1, arg1_1, buf0, 256, XBLOCK =128, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf0, class AddFunctionNew(nn.Module): def __init__(self): super(AddFunctionNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
ShounoLab/res-net-interpretation-open
AddFunction
false
1,069
[ "MIT" ]
0
282dc0ae261467ee1866996416149959db216c02
https://github.com/ShounoLab/res-net-interpretation-open/tree/282dc0ae261467ee1866996416149959db216c02
import torch import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data import torch.utils.data.distributed class Model(nn.Module): def __init__(self): super().__init__() def forward(self, x, y): return x + y def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
WeightedCE
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/td/ctdj5kazgiki6gdaadhqtp2x7tq2ee5ey5hqqdcoqmp54jyhf74f.py # Topologically Sorted Source Nodes: [loss], Original ATen: [aten._log_softmax] # Source node to ATen node mapping: # loss => amax, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%arg1_1, [1], True), kwargs = {}) # %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %amax), kwargs = {}) triton_poi_fused__log_softmax_0 = async_compile.triton('triton_poi_fused__log_softmax_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + (x3), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/hl/chlfdbgpszjgvc5lbbjy2patr43syrsegknoj2ftqczeybmrnw76.py # Topologically Sorted Source Nodes: [loss, mean], Original ATen: [aten._log_softmax, aten.mul, aten.sum, aten.neg, aten.mean] # Source node to ATen node mapping: # loss => exp, log, mul, neg, sub_1, sum_1, sum_2 # mean => mean # Graph fragment: # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %arg0_1), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%sum_2,), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%neg,), kwargs = {}) triton_per_fused__log_softmax_mean_mul_neg_sum_1 = async_compile.triton('triton_per_fused__log_softmax_mean_mul_neg_sum_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__log_softmax_mean_mul_neg_sum_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__log_softmax_mean_mul_neg_sum_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex % 16 r1 = (rindex // 16) tmp0 = tl.load(in_ptr0 + (r0 + (64*r1)), None) tmp2 = tl.load(in_ptr0 + (16 + r0 + (64*r1)), None) tmp5 = tl.load(in_ptr0 + (32 + r0 + (64*r1)), None) tmp8 = tl.load(in_ptr0 + (48 + r0 + (64*r1)), None) tmp13 = tl.load(in_ptr1 + (r0 + (64*r1)), None) tmp16 = tl.load(in_ptr1 + (16 + r0 + (64*r1)), None) tmp20 = tl.load(in_ptr1 + (32 + r0 + (64*r1)), None) tmp24 = tl.load(in_ptr1 + (48 + r0 + (64*r1)), None) tmp1 = tl_math.exp(tmp0) tmp3 = tl_math.exp(tmp2) tmp4 = tmp1 + tmp3 tmp6 = tl_math.exp(tmp5) tmp7 = tmp4 + tmp6 tmp9 = tl_math.exp(tmp8) tmp10 = tmp7 + tmp9 tmp11 = tl_math.log(tmp10) tmp12 = tmp0 - tmp11 tmp14 = tmp12 * tmp13 tmp15 = tmp2 - tmp11 tmp17 = tmp15 * tmp16 tmp18 = tmp14 + tmp17 tmp19 = tmp5 - tmp11 tmp21 = tmp19 * tmp20 tmp22 = tmp18 + tmp21 tmp23 = tmp8 - tmp11 tmp25 = tmp23 * tmp24 tmp26 = tmp22 + tmp25 tmp27 = -tmp26 tmp28 = tl.broadcast_to(tmp27, [XBLOCK, RBLOCK]) tmp30 = tl.sum(tmp28, 1)[:, None] tmp31 = 64.0 tmp32 = tmp30 / tmp31 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp32, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [loss], Original ATen: [aten._log_softmax] stream0 = get_raw_stream(0) triton_poi_fused__log_softmax_0.run(arg1_1, buf0, 256, grid=grid(256), stream=stream0) del arg1_1 buf1 = empty_strided_cuda((), (), torch.float32) buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [loss, mean], Original ATen: [aten._log_softmax, aten.mul, aten.sum, aten.neg, aten.mean] triton_per_fused__log_softmax_mean_mul_neg_sum_1.run(buf2, buf0, arg0_1, 1, 64, grid=grid(1), stream=stream0) del arg0_1 del buf0 return (buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.utils.data import torch.nn.functional as F class WeightedCE(nn.Module): """Mask weighted multi-class cross-entropy (CE) loss. """ def __init__(self): super().__init__() def forward(self, pred, target, weight_mask=None): loss = F.cross_entropy(pred, target, reduction='none') if weight_mask is not None: loss = loss * weight_mask return loss.mean() def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) @triton.jit def triton_per_fused__log_softmax_mean_mul_neg_sum_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex % 16 r1 = rindex // 16 tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None) tmp2 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None) tmp5 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None) tmp8 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None) tmp13 = tl.load(in_ptr1 + (r0 + 64 * r1), None) tmp16 = tl.load(in_ptr1 + (16 + r0 + 64 * r1), None) tmp20 = tl.load(in_ptr1 + (32 + r0 + 64 * r1), None) tmp24 = tl.load(in_ptr1 + (48 + r0 + 64 * r1), None) tmp1 = tl_math.exp(tmp0) tmp3 = tl_math.exp(tmp2) tmp4 = tmp1 + tmp3 tmp6 = tl_math.exp(tmp5) tmp7 = tmp4 + tmp6 tmp9 = tl_math.exp(tmp8) tmp10 = tmp7 + tmp9 tmp11 = tl_math.log(tmp10) tmp12 = tmp0 - tmp11 tmp14 = tmp12 * tmp13 tmp15 = tmp2 - tmp11 tmp17 = tmp15 * tmp16 tmp18 = tmp14 + tmp17 tmp19 = tmp5 - tmp11 tmp21 = tmp19 * tmp20 tmp22 = tmp18 + tmp21 tmp23 = tmp8 - tmp11 tmp25 = tmp23 * tmp24 tmp26 = tmp22 + tmp25 tmp27 = -tmp26 tmp28 = tl.broadcast_to(tmp27, [XBLOCK, RBLOCK]) tmp30 = tl.sum(tmp28, 1)[:, None] tmp31 = 64.0 tmp32 = tmp30 / tmp31 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp32, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__log_softmax_0[grid(256)](arg1_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg1_1 buf1 = empty_strided_cuda((), (), torch.float32) buf2 = buf1 del buf1 triton_per_fused__log_softmax_mean_mul_neg_sum_1[grid(1)](buf2, buf0, arg0_1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del buf0 return buf2, class WeightedCENew(nn.Module): """Mask weighted multi-class cross-entropy (CE) loss. """ def __init__(self): super().__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
Shray64/pytorch_connectomics
WeightedCE
false
1,070
[ "MIT" ]
0
d6c814f11ac2f8418ede5ae220a93016f50214fc
https://github.com/Shray64/pytorch_connectomics/tree/d6c814f11ac2f8418ede5ae220a93016f50214fc
import torch import torch.nn as nn import torch.utils.data import torch.nn.functional as F class Model(nn.Module): """Mask weighted multi-class cross-entropy (CE) loss. """ def __init__(self): super().__init__() def forward(self, pred, target, weight_mask=None): loss = F.cross_entropy(pred, target, reduction='none') if weight_mask is not None: loss = loss * weight_mask return loss.mean() def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
PartialConv
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/p3/cp3qleddjiuuytozrtebx5pzf2ycpwtw4mkq2jsx7qqswymv2bm6.py # Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul] # Source node to ATen node mapping: # mul => mul # Graph fragment: # %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, %primals_2), kwargs = {}) triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask) tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/fj/cfjzvvqpec3bkltw3iqoicgjjjbjt5sjrhrkdgaltmhp7rqzs5eb.py # Topologically Sorted Source Nodes: [output, no_update_holes, mask_sum, sub, mul_1, truediv, output_pre, output_1, new_mask, new_mask_1], Original ATen: [aten.convolution, aten.eq, aten.masked_fill, aten.sub, aten.mul, aten.div, aten.add, aten.ones_like] # Source node to ATen node mapping: # mask_sum => full_default, where # mul_1 => mul_1 # new_mask => full_default_2 # new_mask_1 => where_2 # no_update_holes => eq # output => convolution # output_1 => full_default_1, where_1 # output_pre => add # sub => sub # truediv => div # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%mul, %primals_3, %primals_4, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %eq : [num_users=3] = call_function[target=torch.ops.aten.eq.Scalar](args = (%convolution_1, 0), kwargs = {}) # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 1.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%eq, %full_default, %convolution_1), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%convolution, %expand), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, 64), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_1, %where), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%div, %expand), kwargs = {}) # %full_default_1 : [num_users=2] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where_1 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%eq, %full_default_1, %add), kwargs = {}) # %full_default_2 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 1, 1], 1), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where_2 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%eq, %full_default_1, %full_default_2), kwargs = {}) triton_poi_fused_add_convolution_div_eq_masked_fill_mul_ones_like_sub_1 = async_compile.triton('triton_poi_fused_add_convolution_div_eq_masked_fill_mul_ones_like_sub_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_div_eq_masked_fill_mul_ones_like_sub_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_convolution_div_eq_masked_fill_mul_ones_like_sub_1(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp3 = tl.load(in_out_ptr0 + (x2), xmask) tmp4 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp1 = 0.0 tmp2 = tmp0 == tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp5 - tmp4 tmp7 = 64.0 tmp8 = tmp6 * tmp7 tmp9 = 1.0 tmp10 = tl.where(tmp2, tmp9, tmp0) tmp11 = tmp8 / tmp10 tmp12 = tmp11 + tmp4 tmp13 = tl.where(tmp2, tmp1, tmp12) tmp14 = tl.where(tmp2, tmp1, tmp9) tl.store(in_out_ptr0 + (x2), tmp13, xmask) tl.store(out_ptr0 + (x2), tmp14, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, ), (1, )) assert_size_stride(primals_5, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_mul_0.run(primals_1, primals_2, buf0, 256, grid=grid(256), stream=stream0) del primals_1 # Topologically Sorted Source Nodes: [output], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(buf0, primals_3, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 1, 1), (4, 1, 1, 1)) # Topologically Sorted Source Nodes: [output_mask], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(primals_2, primals_5, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 1, 1), (4, 1, 1, 1)) del primals_2 del primals_5 buf3 = buf1; del buf1 # reuse buf4 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [output, no_update_holes, mask_sum, sub, mul_1, truediv, output_pre, output_1, new_mask, new_mask_1], Original ATen: [aten.convolution, aten.eq, aten.masked_fill, aten.sub, aten.mul, aten.div, aten.add, aten.ones_like] triton_poi_fused_add_convolution_div_eq_masked_fill_mul_ones_like_sub_1.run(buf3, buf2, primals_4, buf4, 16, grid=grid(16), stream=stream0) del primals_4 return (buf3, buf4, primals_3, buf0, buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch import torch.nn as nn def weights_init(init_type='gaussian'): def init_fun(m): classname = m.__class__.__name__ if (classname.find('Conv') == 0 or classname.find('Linear') == 0 ) and hasattr(m, 'weight'): if init_type == 'gaussian': nn.init.normal_(m.weight, 0.0, 0.02) elif init_type == 'xavier': nn.init.xavier_normal_(m.weight, gain=math.sqrt(2)) elif init_type == 'kaiming': nn.init.kaiming_normal_(m.weight, a=0, mode='fan_in') elif init_type == 'orthogonal': nn.init.orthogonal_(m.weight, gain=math.sqrt(2)) elif init_type == 'default': pass else: assert 0, 'Unsupported initialization: {}'.format(init_type) if hasattr(m, 'bias') and m.bias is not None: nn.init.constant_(m.bias, 0.0) return init_fun class PartialConv(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True): super().__init__() self.input_conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias) self.mask_conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, False) self.input_conv.apply(weights_init('kaiming')) self.slide_winsize = in_channels * kernel_size * kernel_size torch.nn.init.constant_(self.mask_conv.weight, 1.0) for param in self.mask_conv.parameters(): param.requires_grad = False def forward(self, input, mask): output = self.input_conv(input * mask) if self.input_conv.bias is not None: output_bias = self.input_conv.bias.view(1, -1, 1, 1).expand_as( output) else: output_bias = torch.zeros_like(output) with torch.no_grad(): output_mask = self.mask_conv(mask) no_update_holes = output_mask == 0 mask_sum = output_mask.masked_fill_(no_update_holes, 1.0) output_pre = (output - output_bias ) * self.slide_winsize / mask_sum + output_bias output = output_pre.masked_fill_(no_update_holes, 0.0) new_mask = torch.ones_like(output) new_mask = new_mask.masked_fill_(no_update_holes, 0.0) return output, new_mask def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_add_convolution_div_eq_masked_fill_mul_ones_like_sub_1( in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_out_ptr0 + x2, xmask) tmp4 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp1 = 0.0 tmp2 = tmp0 == tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp5 - tmp4 tmp7 = 64.0 tmp8 = tmp6 * tmp7 tmp9 = 1.0 tmp10 = tl.where(tmp2, tmp9, tmp0) tmp11 = tmp8 / tmp10 tmp12 = tmp11 + tmp4 tmp13 = tl.where(tmp2, tmp1, tmp12) tmp14 = tl.where(tmp2, tmp1, tmp9) tl.store(in_out_ptr0 + x2, tmp13, xmask) tl.store(out_ptr0 + x2, tmp14, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(256)](primals_1, primals_2, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(buf0, primals_3, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 1, 1), (4, 1, 1, 1)) buf2 = extern_kernels.convolution(primals_2, primals_5, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 1, 1), (4, 1, 1, 1)) del primals_2 del primals_5 buf3 = buf1 del buf1 buf4 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32) triton_poi_fused_add_convolution_div_eq_masked_fill_mul_ones_like_sub_1[ grid(16)](buf3, buf2, primals_4, buf4, 16, XBLOCK=16, num_warps =1, num_stages=1) del primals_4 return buf3, buf4, primals_3, buf0, buf2 def weights_init(init_type='gaussian'): def init_fun(m): classname = m.__class__.__name__ if (classname.find('Conv') == 0 or classname.find('Linear') == 0 ) and hasattr(m, 'weight'): if init_type == 'gaussian': nn.init.normal_(m.weight, 0.0, 0.02) elif init_type == 'xavier': nn.init.xavier_normal_(m.weight, gain=math.sqrt(2)) elif init_type == 'kaiming': nn.init.kaiming_normal_(m.weight, a=0, mode='fan_in') elif init_type == 'orthogonal': nn.init.orthogonal_(m.weight, gain=math.sqrt(2)) elif init_type == 'default': pass else: assert 0, 'Unsupported initialization: {}'.format(init_type) if hasattr(m, 'bias') and m.bias is not None: nn.init.constant_(m.bias, 0.0) return init_fun class PartialConvNew(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True): super().__init__() self.input_conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias) self.mask_conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, False) self.input_conv.apply(weights_init('kaiming')) self.slide_winsize = in_channels * kernel_size * kernel_size torch.nn.init.constant_(self.mask_conv.weight, 1.0) for param in self.mask_conv.parameters(): param.requires_grad = False def forward(self, input_0, input_1): primals_1 = self.input_conv.weight primals_4 = self.input_conv.bias primals_2 = self.mask_conv.weight primals_3 = input_0 primals_5 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0], output[1]
ShiraLightricks/3d-photo-inpainting
PartialConv
false
1,071
[ "MIT" ]
0
c42ac41576690b765e50f5281ddbfb58439ff36d
https://github.com/ShiraLightricks/3d-photo-inpainting/tree/c42ac41576690b765e50f5281ddbfb58439ff36d
import math import torch import torch.nn as nn def weights_init(init_type='gaussian'): def init_fun(m): classname = m.__class__.__name__ if (classname.find('Conv') == 0 or classname.find('Linear') == 0 ) and hasattr(m, 'weight'): if init_type == 'gaussian': nn.init.normal_(m.weight, 0.0, 0.02) elif init_type == 'xavier': nn.init.xavier_normal_(m.weight, gain=math.sqrt(2)) elif init_type == 'kaiming': nn.init.kaiming_normal_(m.weight, a=0, mode='fan_in') elif init_type == 'orthogonal': nn.init.orthogonal_(m.weight, gain=math.sqrt(2)) elif init_type == 'default': pass else: assert 0, 'Unsupported initialization: {}'.format(init_type) if hasattr(m, 'bias') and m.bias is not None: nn.init.constant_(m.bias, 0.0) return init_fun class Model(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True): super().__init__() self.input_conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias) self.mask_conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, False) self.input_conv.apply(weights_init('kaiming')) self.slide_winsize = in_channels * kernel_size * kernel_size torch.nn.init.constant_(self.mask_conv.weight, 1.0) for param in self.mask_conv.parameters(): param.requires_grad = False def forward(self, input, mask): output = self.input_conv(input * mask) if self.input_conv.bias is not None: output_bias = self.input_conv.bias.view(1, -1, 1, 1).expand_as( output) else: output_bias = torch.zeros_like(output) with torch.no_grad(): output_mask = self.mask_conv(mask) no_update_holes = output_mask == 0 mask_sum = output_mask.masked_fill_(no_update_holes, 1.0) output_pre = (output - output_bias ) * self.slide_winsize / mask_sum + output_bias output = output_pre.masked_fill_(no_update_holes, 0.0) new_mask = torch.ones_like(output) new_mask = new_mask.masked_fill_(no_update_holes, 0.0) return output, new_mask def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 4, 4]
ProtoNN
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/dj/cdjhwfxa4rmj7sf4p4wvsuo75o54dwfcioekxuix47trjvtclo2h.py # Topologically Sorted Source Nodes: [l2sim, l2sim_1, l2sim_2], Original ATen: [aten.sub, aten.pow, aten.sum] # Source node to ATen node mapping: # l2sim => sub # l2sim_1 => pow_1 # l2sim_2 => sum_1 # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_3, %view_2), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {}) # %sum_1 : [num_users=2] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1], True), kwargs = {}) triton_poi_fused_pow_sub_sum_0 = async_compile.triton('triton_poi_fused_pow_sub_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_pow_sub_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_pow_sub_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4*x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr1 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr0 + (12 + x0), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr1 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp6 = tmp4 - tmp5 tmp7 = tmp6 * tmp6 tmp8 = tmp3 + tmp7 tmp11 = tmp9 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tmp8 + tmp12 tmp16 = tmp14 - tmp15 tmp17 = tmp16 * tmp16 tmp18 = tmp13 + tmp17 tl.store(out_ptr0 + (x2), tmp18, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/vq/cvqpmuazxnwnsptaxlt2b7ipl2pprxtdxzwiesxonokjn253gipy.py # Topologically Sorted Source Nodes: [gammal2sim, M, y, y_1], Original ATen: [aten.mul, aten.exp, aten.sum] # Source node to ATen node mapping: # M => exp # gammal2sim => mul # y => mul_1 # y_1 => sum_2 # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_1, -16), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%mul,), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_4, %exp), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_1, [2]), kwargs = {}) triton_poi_fused_exp_mul_sum_1 = async_compile.triton('triton_poi_fused_exp_mul_sum_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_exp_mul_sum_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_exp_mul_sum_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4*x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr1 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp18 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp19 = tl.load(in_ptr1 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp2 = -16.0 tmp3 = tmp1 * tmp2 tmp4 = tl_math.exp(tmp3) tmp5 = tmp0 * tmp4 tmp8 = tmp7 * tmp2 tmp9 = tl_math.exp(tmp8) tmp10 = tmp6 * tmp9 tmp11 = tmp5 + tmp10 tmp14 = tmp13 * tmp2 tmp15 = tl_math.exp(tmp14) tmp16 = tmp12 * tmp15 tmp17 = tmp11 + tmp16 tmp20 = tmp19 * tmp2 tmp21 = tl_math.exp(tmp20) tmp22 = tmp18 * tmp21 tmp23 = tmp17 + tmp22 tl.store(out_ptr0 + (x2), tmp23, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [WX], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_4, (64, 4), (4, 1), 0), primals_1, out=buf0) del primals_1 buf1 = empty_strided_cuda((64, 1, 4), (4, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [l2sim, l2sim_1, l2sim_2], Original ATen: [aten.sub, aten.pow, aten.sum] stream0 = get_raw_stream(0) triton_poi_fused_pow_sub_sum_0.run(primals_2, buf0, buf1, 256, grid=grid(256), stream=stream0) buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [gammal2sim, M, y, y_1], Original ATen: [aten.mul, aten.exp, aten.sum] triton_poi_fused_exp_mul_sum_1.run(primals_3, buf1, buf2, 256, grid=grid(256), stream=stream0) return (buf2, buf1, primals_2, primals_3, buf0, buf1, reinterpret_tensor(primals_4, (4, 64), (1, 4), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import numpy as np import torch.nn as nn import torch.onnx from itertools import product as product class ProtoNN(nn.Module): def __init__(self, inputDimension, projectionDimension, numPrototypes, numOutputLabels, gamma, W=None, B=None, Z=None): """ Forward computation graph for ProtoNN. inputDimension: Input data dimension or feature dimension. projectionDimension: hyperparameter numPrototypes: hyperparameter numOutputLabels: The number of output labels or classes W, B, Z: Numpy matrices that can be used to initialize projection matrix(W), prototype matrix (B) and prototype labels matrix (B). Expected Dimensions: W inputDimension (d) x projectionDimension (d_cap) B projectionDimension (d_cap) x numPrototypes (m) Z numOutputLabels (L) x numPrototypes (m) """ super(ProtoNN, self).__init__() self.__d = inputDimension self.__d_cap = projectionDimension self.__m = numPrototypes self.__L = numOutputLabels self.W, self.B, self.Z = None, None, None self.gamma = gamma self.__validInit = False self.__initWBZ(W, B, Z) self.__validateInit() def __validateInit(self): self.__validinit = False errmsg = 'Dimensions mismatch! Should be W[d, d_cap]' errmsg += ', B[d_cap, m] and Z[L, m]' d, d_cap, m, L, _ = self.getHyperParams() assert self.W.shape[0] == d, errmsg assert self.W.shape[1] == d_cap, errmsg assert self.B.shape[0] == d_cap, errmsg assert self.B.shape[1] == m, errmsg assert self.Z.shape[0] == L, errmsg assert self.Z.shape[1] == m, errmsg self.__validInit = True def __initWBZ(self, inW, inB, inZ): if inW is None: self.W = torch.randn([self.__d, self.__d_cap]) self.W = nn.Parameter(self.W) else: self.W = nn.Parameter(torch.from_numpy(inW.astype(np.float32))) if inB is None: self.B = torch.randn([self.__d_cap, self.__m]) self.B = nn.Parameter(self.B) else: self.B = nn.Parameter(torch.from_numpy(inB.astype(np.float32))) if inZ is None: self.Z = torch.randn([self.__L, self.__m]) self.Z = nn.Parameter(self.Z) else: self.Z = nn.Parameter(torch.from_numpy(inZ.astype(np.float32))) def getHyperParams(self): """ Returns the model hyperparameters: [inputDimension, projectionDimension, numPrototypes, numOutputLabels, gamma] """ d = self.__d dcap = self.__d_cap m = self.__m L = self.__L return d, dcap, m, L, self.gamma def getModelMatrices(self): """ Returns model matrices, which can then be evaluated to obtain corresponding numpy arrays. These can then be exported as part of other implementations of ProtonNN, for instance a C++ implementation or pure python implementation. Returns [ProjectionMatrix (W), prototypeMatrix (B), prototypeLabelsMatrix (Z), gamma] """ return self.W, self.B, self.Z, self.gamma def forward(self, X): """ This method is responsible for construction of the forward computation graph. The end point of the computation graph, or in other words the output operator for the forward computation is returned. X: Input of shape [-1, inputDimension] returns: The forward computation outputs, self.protoNNOut """ assert self.__validInit is True, 'Initialization failed!' W, B, Z, gamma = self.W, self.B, self.Z, self.gamma WX = torch.matmul(X, W) dim = [-1, WX.shape[1], 1] WX = torch.reshape(WX, dim) dim = [1, B.shape[0], -1] B_ = torch.reshape(B, dim) l2sim = B_ - WX l2sim = torch.pow(l2sim, 2) l2sim = torch.sum(l2sim, dim=1, keepdim=True) self.l2sim = l2sim gammal2sim = -1 * gamma * gamma * l2sim M = torch.exp(gammal2sim) dim = [1] + list(Z.shape) Z_ = torch.reshape(Z, dim) y = Z_ * M y = torch.sum(y, dim=2) return y def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'inputDimension': 4, 'projectionDimension': 4, 'numPrototypes': 4, 'numOutputLabels': 4, 'gamma': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import numpy as np import torch.nn as nn import torch.onnx from itertools import product as product assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_pow_sub_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp14 = tl.load(in_ptr0 + (12 + x0), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp6 = tmp4 - tmp5 tmp7 = tmp6 * tmp6 tmp8 = tmp3 + tmp7 tmp11 = tmp9 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tmp8 + tmp12 tmp16 = tmp14 - tmp15 tmp17 = tmp16 * tmp16 tmp18 = tmp13 + tmp17 tl.store(out_ptr0 + x2, tmp18, xmask) @triton.jit def triton_poi_fused_exp_mul_sum_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp13 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp18 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp19 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp2 = -16.0 tmp3 = tmp1 * tmp2 tmp4 = tl_math.exp(tmp3) tmp5 = tmp0 * tmp4 tmp8 = tmp7 * tmp2 tmp9 = tl_math.exp(tmp8) tmp10 = tmp6 * tmp9 tmp11 = tmp5 + tmp10 tmp14 = tmp13 * tmp2 tmp15 = tl_math.exp(tmp14) tmp16 = tmp12 * tmp15 tmp17 = tmp11 + tmp16 tmp20 = tmp19 * tmp2 tmp21 = tl_math.exp(tmp20) tmp22 = tmp18 * tmp21 tmp23 = tmp17 + tmp22 tl.store(out_ptr0 + x2, tmp23, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_4, (64, 4), (4, 1), 0), primals_1, out=buf0) del primals_1 buf1 = empty_strided_cuda((64, 1, 4), (4, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_pow_sub_sum_0[grid(256)](primals_2, buf0, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1) buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) triton_poi_fused_exp_mul_sum_1[grid(256)](primals_3, buf1, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1) return buf2, buf1, primals_2, primals_3, buf0, buf1, reinterpret_tensor( primals_4, (4, 64), (1, 4), 0) class ProtoNNNew(nn.Module): def __init__(self, inputDimension, projectionDimension, numPrototypes, numOutputLabels, gamma, W=None, B=None, Z=None): """ Forward computation graph for ProtoNN. inputDimension: Input data dimension or feature dimension. projectionDimension: hyperparameter numPrototypes: hyperparameter numOutputLabels: The number of output labels or classes W, B, Z: Numpy matrices that can be used to initialize projection matrix(W), prototype matrix (B) and prototype labels matrix (B). Expected Dimensions: W inputDimension (d) x projectionDimension (d_cap) B projectionDimension (d_cap) x numPrototypes (m) Z numOutputLabels (L) x numPrototypes (m) """ super(ProtoNNNew, self).__init__() self.__d = inputDimension self.__d_cap = projectionDimension self.__m = numPrototypes self.__L = numOutputLabels self.W, self.B, self.Z = None, None, None self.gamma = gamma self.__validInit = False self.__initWBZ(W, B, Z) self.__validateInit() def __validateInit(self): self.__validinit = False errmsg = 'Dimensions mismatch! Should be W[d, d_cap]' errmsg += ', B[d_cap, m] and Z[L, m]' d, d_cap, m, L, _ = self.getHyperParams() assert self.W.shape[0] == d, errmsg assert self.W.shape[1] == d_cap, errmsg assert self.B.shape[0] == d_cap, errmsg assert self.B.shape[1] == m, errmsg assert self.Z.shape[0] == L, errmsg assert self.Z.shape[1] == m, errmsg self.__validInit = True def __initWBZ(self, inW, inB, inZ): if inW is None: self.W = torch.randn([self.__d, self.__d_cap]) self.W = nn.Parameter(self.W) else: self.W = nn.Parameter(torch.from_numpy(inW.astype(np.float32))) if inB is None: self.B = torch.randn([self.__d_cap, self.__m]) self.B = nn.Parameter(self.B) else: self.B = nn.Parameter(torch.from_numpy(inB.astype(np.float32))) if inZ is None: self.Z = torch.randn([self.__L, self.__m]) self.Z = nn.Parameter(self.Z) else: self.Z = nn.Parameter(torch.from_numpy(inZ.astype(np.float32))) def getHyperParams(self): """ Returns the model hyperparameters: [inputDimension, projectionDimension, numPrototypes, numOutputLabels, gamma] """ d = self.__d dcap = self.__d_cap m = self.__m L = self.__L return d, dcap, m, L, self.gamma def getModelMatrices(self): """ Returns model matrices, which can then be evaluated to obtain corresponding numpy arrays. These can then be exported as part of other implementations of ProtonNN, for instance a C++ implementation or pure python implementation. Returns [ProjectionMatrix (W), prototypeMatrix (B), prototypeLabelsMatrix (Z), gamma] """ return self.W, self.B, self.Z, self.gamma def forward(self, input_0): primals_1 = self.W primals_2 = self.B primals_3 = self.Z primals_4 = input_0 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
ShishirPatil/EdgeML-1
ProtoNN
false
1,072
[ "MIT" ]
0
cbba9f8b989e545788427c004eb8450e7e4c1a21
https://github.com/ShishirPatil/EdgeML-1/tree/cbba9f8b989e545788427c004eb8450e7e4c1a21
import torch import numpy as np import torch.nn as nn import torch.onnx from itertools import product as product class Model(nn.Module): def __init__(self, inputDimension, projectionDimension, numPrototypes, numOutputLabels, gamma, W=None, B=None, Z=None): """ Forward computation graph for ProtoNN. inputDimension: Input data dimension or feature dimension. projectionDimension: hyperparameter numPrototypes: hyperparameter numOutputLabels: The number of output labels or classes W, B, Z: Numpy matrices that can be used to initialize projection matrix(W), prototype matrix (B) and prototype labels matrix (B). Expected Dimensions: W inputDimension (d) x projectionDimension (d_cap) B projectionDimension (d_cap) x numPrototypes (m) Z numOutputLabels (L) x numPrototypes (m) """ super().__init__() self.__d = inputDimension self.__d_cap = projectionDimension self.__m = numPrototypes self.__L = numOutputLabels self.W, self.B, self.Z = None, None, None self.gamma = gamma self.__validInit = False self.__initWBZ(W, B, Z) self.__validateInit() def __validateInit(self): self.__validinit = False errmsg = 'Dimensions mismatch! Should be W[d, d_cap]' errmsg += ', B[d_cap, m] and Z[L, m]' d, d_cap, m, L, _ = self.getHyperParams() assert self.W.shape[0] == d, errmsg assert self.W.shape[1] == d_cap, errmsg assert self.B.shape[0] == d_cap, errmsg assert self.B.shape[1] == m, errmsg assert self.Z.shape[0] == L, errmsg assert self.Z.shape[1] == m, errmsg self.__validInit = True def __initWBZ(self, inW, inB, inZ): if inW is None: self.W = torch.randn([self.__d, self.__d_cap]) self.W = nn.Parameter(self.W) else: self.W = nn.Parameter(torch.from_numpy(inW.astype(np.float32))) if inB is None: self.B = torch.randn([self.__d_cap, self.__m]) self.B = nn.Parameter(self.B) else: self.B = nn.Parameter(torch.from_numpy(inB.astype(np.float32))) if inZ is None: self.Z = torch.randn([self.__L, self.__m]) self.Z = nn.Parameter(self.Z) else: self.Z = nn.Parameter(torch.from_numpy(inZ.astype(np.float32))) def getHyperParams(self): """ Returns the model hyperparameters: [inputDimension, projectionDimension, numPrototypes, numOutputLabels, gamma] """ d = self.__d dcap = self.__d_cap m = self.__m L = self.__L return d, dcap, m, L, self.gamma def getModelMatrices(self): """ Returns model matrices, which can then be evaluated to obtain corresponding numpy arrays. These can then be exported as part of other implementations of ProtonNN, for instance a C++ implementation or pure python implementation. Returns [ProjectionMatrix (W), prototypeMatrix (B), prototypeLabelsMatrix (Z), gamma] """ return self.W, self.B, self.Z, self.gamma def forward(self, X): """ This method is responsible for construction of the forward computation graph. The end point of the computation graph, or in other words the output operator for the forward computation is returned. X: Input of shape [-1, inputDimension] returns: The forward computation outputs, self.protoNNOut """ assert self.__validInit is True, 'Initialization failed!' W, B, Z, gamma = self.W, self.B, self.Z, self.gamma WX = torch.matmul(X, W) dim = [-1, WX.shape[1], 1] WX = torch.reshape(WX, dim) dim = [1, B.shape[0], -1] B_ = torch.reshape(B, d # ... truncated (>4000 chars) for memory efficiency
GRULRCell
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/3l/c3lvfnzdm4j54qmpqlq4wtibe7wlwzukm4xfgt3bw5bdnfre2jng.py # Topologically Sorted Source Nodes: [pre_comp1, add_2, r, mul], Original ATen: [aten.add, aten.sigmoid, aten.mul, aten.sigmoid_backward] # Source node to ATen node mapping: # add_2 => add_2 # mul => mul # pre_comp1 => add # r => sigmoid # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %view_7), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %primals_8), kwargs = {}) # %sigmoid : [num_users=3] = call_function[target=torch.ops.aten.sigmoid.default](args = (%add_2,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %primals_6), kwargs = {}) # %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %sigmoid), kwargs = {}) # %mul_11 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %sub_3), kwargs = {}) triton_poi_fused_add_mul_sigmoid_sigmoid_backward_0 = async_compile.triton('triton_poi_fused_add_mul_sigmoid_sigmoid_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_sigmoid_sigmoid_backward_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_sigmoid_sigmoid_backward_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x2), xmask) tmp3 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr3 + (x2), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp5 = tl.sigmoid(tmp4) tmp7 = tmp5 * tmp6 tmp8 = 1.0 tmp9 = tmp8 - tmp5 tmp10 = tmp5 * tmp9 tl.store(out_ptr0 + (x2), tmp7, xmask) tl.store(out_ptr1 + (x2), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/lz/clzjpvamptc4sf4uhsz3jl6umhb36eb2okmo444rgajkmwalgs3q.py # Topologically Sorted Source Nodes: [pre_comp2, add_3, z, pre_comp3, add_5, c, mul_1, sub, mul_2, new_h], Original ATen: [aten.add, aten.sigmoid, aten.tanh, aten.mul, aten.rsub] # Source node to ATen node mapping: # add_3 => add_3 # add_5 => add_5 # c => tanh # mul_1 => mul_1 # mul_2 => mul_2 # new_h => add_6 # pre_comp2 => add_1 # pre_comp3 => add_4 # sub => sub # z => sigmoid_1 # Graph fragment: # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_3, %view_9), kwargs = {}) # %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, %primals_9), kwargs = {}) # %sigmoid_1 : [num_users=3] = call_function[target=torch.ops.aten.sigmoid.default](args = (%add_3,), kwargs = {}) # %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_5, %view_11), kwargs = {}) # %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_4, %primals_11), kwargs = {}) # %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%add_5,), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid_1, %primals_6), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %sigmoid_1), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %tanh), kwargs = {}) # %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %mul_2), kwargs = {}) triton_poi_fused_add_mul_rsub_sigmoid_tanh_1 = async_compile.triton('triton_poi_fused_add_mul_rsub_sigmoid_tanh_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_rsub_sigmoid_tanh_1', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_rsub_sigmoid_tanh_1(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x2), xmask) tmp3 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_out_ptr1 + (x2), xmask) tmp7 = tl.load(in_ptr2 + (x2), xmask) tmp9 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + (x2), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp5 = tl.sigmoid(tmp4) tmp8 = tmp6 + tmp7 tmp10 = tmp8 + tmp9 tmp11 = libdevice.tanh(tmp10) tmp13 = tmp5 * tmp12 tmp14 = 1.0 tmp15 = tmp14 - tmp5 tmp16 = tmp15 * tmp11 tmp17 = tmp13 + tmp16 tl.store(in_out_ptr0 + (x2), tmp5, xmask) tl.store(in_out_ptr1 + (x2), tmp11, xmask) tl.store(out_ptr0 + (x2), tmp17, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (1, 4), (4, 1)) assert_size_stride(primals_9, (1, 4), (4, 1)) assert_size_stride(primals_10, (4, 4), (4, 1)) assert_size_stride(primals_11, (1, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [wComp1], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), primals_1, out=buf0) del primals_1 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [wComp2], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), primals_3, out=buf1) del primals_3 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [wComp3], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), primals_4, out=buf2) del primals_4 buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [uComp1], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), primals_5, out=buf3) del primals_5 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [uComp2], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), primals_7, out=buf4) del primals_7 buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [pre_comp1, add_2, r, mul], Original ATen: [aten.add, aten.sigmoid, aten.mul, aten.sigmoid_backward] stream0 = get_raw_stream(0) triton_poi_fused_add_mul_sigmoid_sigmoid_backward_0.run(buf0, buf3, primals_8, primals_6, buf6, buf10, 256, grid=grid(256), stream=stream0) del primals_8 buf7 = buf3; del buf3 # reuse # Topologically Sorted Source Nodes: [matmul_5], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf6, (64, 4), (4, 1), 0), primals_10, out=buf7) buf5 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf1 # reuse buf8 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf2 # reuse buf9 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [pre_comp2, add_3, z, pre_comp3, add_5, c, mul_1, sub, mul_2, new_h], Original ATen: [aten.add, aten.sigmoid, aten.tanh, aten.mul, aten.rsub] triton_poi_fused_add_mul_rsub_sigmoid_tanh_1.run(buf5, buf8, buf4, primals_9, buf7, primals_11, primals_6, buf9, 256, grid=grid(256), stream=stream0) del buf4 del buf7 del primals_11 del primals_9 return (buf9, primals_6, buf5, buf8, reinterpret_tensor(buf6, (4, 64), (1, 4), 0), reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), buf10, reinterpret_tensor(primals_2, (4, 64), (1, 4), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.onnx from itertools import product as product def gen_nonlinearity(A, nonlinearity): """ Returns required activation for a tensor based on the inputs nonlinearity is either a callable or a value in ['tanh', 'sigmoid', 'relu', 'quantTanh', 'quantSigm', 'quantSigm4'] """ if nonlinearity == 'tanh': return torch.tanh(A) elif nonlinearity == 'sigmoid': return torch.sigmoid(A) elif nonlinearity == 'relu': return torch.relu(A, 0.0) elif nonlinearity == 'quantTanh': return torch.max(torch.min(A, torch.ones_like(A)), -1.0 * torch. ones_like(A)) elif nonlinearity == 'quantSigm': A = (A + 1.0) / 2.0 return torch.max(torch.min(A, torch.ones_like(A)), torch.zeros_like(A)) elif nonlinearity == 'quantSigm4': A = (A + 2.0) / 4.0 return torch.max(torch.min(A, torch.ones_like(A)), torch.zeros_like(A)) else: if not callable(nonlinearity): raise ValueError( 'nonlinearity is either a callable or a value ' + "['tanh', 'sigmoid', 'relu', 'quantTanh', " + "'quantSigm'") return nonlinearity(A) class RNNCell(nn.Module): def __init__(self, input_size, hidden_size, gate_nonlinearity, update_nonlinearity, num_W_matrices, num_U_matrices, num_biases, wRank=None, uRank=None, wSparsity=1.0, uSparsity=1.0): super(RNNCell, self).__init__() self._input_size = input_size self._hidden_size = hidden_size self._gate_nonlinearity = gate_nonlinearity self._update_nonlinearity = update_nonlinearity self._num_W_matrices = num_W_matrices self._num_U_matrices = num_U_matrices self._num_biases = num_biases self._num_weight_matrices = [self._num_W_matrices, self. _num_U_matrices, self._num_biases] self._wRank = wRank self._uRank = uRank self._wSparsity = wSparsity self._uSparsity = uSparsity self.oldmats = [] @property def state_size(self): return self._hidden_size @property def input_size(self): return self._input_size @property def output_size(self): return self._hidden_size @property def gate_nonlinearity(self): return self._gate_nonlinearity @property def update_nonlinearity(self): return self._update_nonlinearity @property def wRank(self): return self._wRank @property def uRank(self): return self._uRank @property def num_W_matrices(self): return self._num_W_matrices @property def num_U_matrices(self): return self._num_U_matrices @property def num_weight_matrices(self): return self._num_weight_matrices @property def name(self): raise NotImplementedError() def forward(self, input, state): raise NotImplementedError() def getVars(self): raise NotImplementedError() def get_model_size(self): """ Function to get aimed model size """ mats = self.getVars() endW = self._num_W_matrices endU = endW + self._num_U_matrices totalnnz = 2 for i in range(0, endW): mats[i].device totalnnz += utils.countNNZ(mats[i].cpu(), self._wSparsity) mats[i] for i in range(endW, endU): mats[i].device totalnnz += utils.countNNZ(mats[i].cpu(), self._uSparsity) mats[i] for i in range(endU, len(mats)): mats[i].device totalnnz += utils.countNNZ(mats[i].cpu(), False) mats[i] return totalnnz * 4 def copy_previous_UW(self): mats = self.getVars() num_mats = self._num_W_matrices + self._num_U_matrices if len(self.oldmats) != num_mats: for i in range(num_mats): self.oldmats.append(torch.FloatTensor()) for i in range(num_mats): self.oldmats[i] = torch.FloatTensor(mats[i].detach().clone()) def sparsify(self): mats = self.getVars() endW = self._num_W_matrices endU = endW + self._num_U_matrices for i in range(0, endW): mats[i] = utils.hardThreshold(mats[i], self._wSparsity) for i in range(endW, endU): mats[i] = utils.hardThreshold(mats[i], self._uSparsity) self.copy_previous_UW() def sparsifyWithSupport(self): mats = self.getVars() endU = self._num_W_matrices + self._num_U_matrices for i in range(0, endU): mats[i] = utils.supportBasedThreshold(mats[i], self.oldmats[i]) class GRULRCell(RNNCell): """ GRU LR Cell with Both Full Rank and Low Rank Formulations Has multiple activation functions for the gates hidden_size = # hidden units gate_nonlinearity = nonlinearity for the gate can be chosen from [tanh, sigmoid, relu, quantTanh, quantSigm] update_nonlinearity = nonlinearity for final rnn update can be chosen from [tanh, sigmoid, relu, quantTanh, quantSigm] wRank = rank of W matrix (creates 4 matrices if not None else creates 3 matrices) uRank = rank of U matrix (creates 4 matrices if not None else creates 3 matrices) GRU architecture and compression techniques are found in GRU(LINK) paper Basic architecture is like: r_t = gate_nl(W1x_t + U1h_{t-1} + B_r) z_t = gate_nl(W2x_t + U2h_{t-1} + B_g) h_t^ = update_nl(W3x_t + r_t*U3(h_{t-1}) + B_h) h_t = z_t*h_{t-1} + (1-z_t)*h_t^ Wi and Ui can further parameterised into low rank version by Wi = matmul(W, W_i) and Ui = matmul(U, U_i) """ def __init__(self, input_size, hidden_size, gate_nonlinearity='sigmoid', update_nonlinearity='tanh', wRank=None, uRank=None, wSparsity=1.0, uSparsity=1.0, name='GRULR'): super(GRULRCell, self).__init__(input_size, hidden_size, gate_nonlinearity, update_nonlinearity, 3, 3, 3, wRank, uRank, wSparsity, uSparsity) if wRank is not None: self._num_W_matrices += 1 self._num_weight_matrices[0] = self._num_W_matrices if uRank is not None: self._num_U_matrices += 1 self._num_weight_matrices[1] = self._num_U_matrices self._name = name if wRank is None: self.W1 = nn.Parameter(0.1 * torch.randn([input_size, hidden_size]) ) self.W2 = nn.Parameter(0.1 * torch.randn([input_size, hidden_size]) ) self.W3 = nn.Parameter(0.1 * torch.randn([input_size, hidden_size]) ) else: self.W = nn.Parameter(0.1 * torch.randn([input_size, wRank])) self.W1 = nn.Parameter(0.1 * torch.randn([wRank, hidden_size])) self.W2 = nn.Parameter(0.1 * torch.randn([wRank, hidden_size])) self.W3 = nn.Parameter(0.1 * torch.randn([wRank, hidden_size])) if uRank is None: self.U1 = nn.Parameter(0.1 * torch.randn([hidden_size, hidden_size])) self.U2 = nn.Parameter(0.1 * torch.randn([hidden_size, hidden_size])) self.U3 = nn.Parameter(0.1 * torch.randn([hidden_size, hidden_size])) else: self.U = nn.Parameter(0.1 * torch.randn([hidden_size, uRank])) self.U1 = nn.Parameter(0.1 * torch.randn([uRank, hidden_size])) self.U2 = nn.Parameter(0.1 * torch.randn([uRank, hidden_size])) self.U3 = nn.Parameter(0.1 * torch.randn([uRank, hidden_size])) self.bias_r = nn.Parameter(torch.ones([1, hidden_size])) self.bias_gate = nn.Parameter(torch.ones([1, hidden_size])) self.bias_update = nn.Parameter(torch.ones([1, hidden_size])) self._device = self.bias_update.device @property def name(self): return self._name @property def cellType(self): return 'GRULR' def forward(self, input, state): if self._wRank is None: wComp1 = torch.matmul(input, self.W1) wComp2 = torch.matmul(input, self.W2) wComp3 = torch.matmul(input, self.W3) else: wComp1 = torch.matmul(torch.matmul(input, self.W), self.W1) wComp2 = torch.matmul(torch.matmul(input, self.W), self.W2) wComp3 = torch.matmul(torch.matmul(input, self.W), self.W3) if self._uRank is None: uComp1 = torch.matmul(state, self.U1) uComp2 = torch.matmul(state, self.U2) else: uComp1 = torch.matmul(torch.matmul(state, self.U), self.U1) uComp2 = torch.matmul(torch.matmul(state, self.U), self.U2) pre_comp1 = wComp1 + uComp1 pre_comp2 = wComp2 + uComp2 r = gen_nonlinearity(pre_comp1 + self.bias_r, self._gate_nonlinearity) z = gen_nonlinearity(pre_comp2 + self.bias_gate, self. _gate_nonlinearity) if self._uRank is None: pre_comp3 = wComp3 + torch.matmul(r * state, self.U3) else: pre_comp3 = wComp3 + torch.matmul(torch.matmul(r * state, self. U), self.U3) c = gen_nonlinearity(pre_comp3 + self.bias_update, self. _update_nonlinearity) new_h = z * state + (1.0 - z) * c return new_h def getVars(self): Vars = [] if self._num_W_matrices == 3: Vars.extend([self.W1, self.W2, self.W3]) else: Vars.extend([self.W, self.W1, self.W2, self.W3]) if self._num_U_matrices == 3: Vars.extend([self.U1, self.U2, self.U3]) else: Vars.extend([self.U, self.U1, self.U2, self.U3]) Vars.extend([self.bias_r, self.bias_gate, self.bias_update]) return Vars def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_size': 4, 'hidden_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.onnx from itertools import product as product assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_mul_sigmoid_sigmoid_backward_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp3 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr3 + x2, xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp5 = tl.sigmoid(tmp4) tmp7 = tmp5 * tmp6 tmp8 = 1.0 tmp9 = tmp8 - tmp5 tmp10 = tmp5 * tmp9 tl.store(out_ptr0 + x2, tmp7, xmask) tl.store(out_ptr1 + x2, tmp10, xmask) @triton.jit def triton_poi_fused_add_mul_rsub_sigmoid_tanh_1(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_out_ptr1 + x2, xmask) tmp7 = tl.load(in_ptr2 + x2, xmask) tmp9 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + x2, xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp5 = tl.sigmoid(tmp4) tmp8 = tmp6 + tmp7 tmp10 = tmp8 + tmp9 tmp11 = libdevice.tanh(tmp10) tmp13 = tmp5 * tmp12 tmp14 = 1.0 tmp15 = tmp14 - tmp5 tmp16 = tmp15 * tmp11 tmp17 = tmp13 + tmp16 tl.store(in_out_ptr0 + x2, tmp5, xmask) tl.store(in_out_ptr1 + x2, tmp11, xmask) tl.store(out_ptr0 + x2, tmp17, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (1, 4), (4, 1)) assert_size_stride(primals_9, (1, 4), (4, 1)) assert_size_stride(primals_10, (4, 4), (4, 1)) assert_size_stride(primals_11, (1, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), primals_1, out=buf0) del primals_1 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), primals_3, out=buf1) del primals_3 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), primals_4, out=buf2) del primals_4 buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), primals_5, out=buf3) del primals_5 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), primals_7, out=buf4) del primals_7 buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mul_sigmoid_sigmoid_backward_0[grid(256)](buf0, buf3, primals_8, primals_6, buf6, buf10, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_8 buf7 = buf3 del buf3 extern_kernels.mm(reinterpret_tensor(buf6, (64, 4), (4, 1), 0), primals_10, out=buf7) buf5 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf1 buf8 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf2 buf9 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 triton_poi_fused_add_mul_rsub_sigmoid_tanh_1[grid(256)](buf5, buf8, buf4, primals_9, buf7, primals_11, primals_6, buf9, 256, XBLOCK =256, num_warps=4, num_stages=1) del buf4 del buf7 del primals_11 del primals_9 return buf9, primals_6, buf5, buf8, reinterpret_tensor(buf6, (4, 64), ( 1, 4), 0), reinterpret_tensor(primals_10, (4, 4), (1, 4), 0 ), buf10, reinterpret_tensor(primals_2, (4, 64), (1, 4), 0) def gen_nonlinearity(A, nonlinearity): """ Returns required activation for a tensor based on the inputs nonlinearity is either a callable or a value in ['tanh', 'sigmoid', 'relu', 'quantTanh', 'quantSigm', 'quantSigm4'] """ if nonlinearity == 'tanh': return torch.tanh(A) elif nonlinearity == 'sigmoid': return torch.sigmoid(A) elif nonlinearity == 'relu': return torch.relu(A, 0.0) elif nonlinearity == 'quantTanh': return torch.max(torch.min(A, torch.ones_like(A)), -1.0 * torch. ones_like(A)) elif nonlinearity == 'quantSigm': A = (A + 1.0) / 2.0 return torch.max(torch.min(A, torch.ones_like(A)), torch.zeros_like(A)) elif nonlinearity == 'quantSigm4': A = (A + 2.0) / 4.0 return torch.max(torch.min(A, torch.ones_like(A)), torch.zeros_like(A)) else: if not callable(nonlinearity): raise ValueError( 'nonlinearity is either a callable or a value ' + "['tanh', 'sigmoid', 'relu', 'quantTanh', " + "'quantSigm'") return nonlinearity(A) class RNNCell(nn.Module): def __init__(self, input_size, hidden_size, gate_nonlinearity, update_nonlinearity, num_W_matrices, num_U_matrices, num_biases, wRank=None, uRank=None, wSparsity=1.0, uSparsity=1.0): super(RNNCell, self).__init__() self._input_size = input_size self._hidden_size = hidden_size self._gate_nonlinearity = gate_nonlinearity self._update_nonlinearity = update_nonlinearity self._num_W_matrices = num_W_matrices self._num_U_matrices = num_U_matrices self._num_biases = num_biases self._num_weight_matrices = [self._num_W_matrices, self. _num_U_matrices, self._num_biases] self._wRank = wRank self._uRank = uRank self._wSparsity = wSparsity self._uSparsity = uSparsity self.oldmats = [] @property def state_size(self): return self._hidden_size @property def input_size(self): return self._input_size @property def output_size(self): return self._hidden_size @property def gate_nonlinearity(self): return self._gate_nonlinearity @property def update_nonlinearity(self): return self._update_nonlinearity @property def wRank(self): return self._wRank @property def uRank(self): return self._uRank @property def num_W_matrices(self): return self._num_W_matrices @property def num_U_matrices(self): return self._num_U_matrices @property def num_weight_matrices(self): return self._num_weight_matrices @property def name(self): raise NotImplementedError() def forward(self, input, state): raise NotImplementedError() def getVars(self): raise NotImplementedError() def get_model_size(self): """ Function to get aimed model size """ mats = self.getVars() endW = self._num_W_matrices endU = endW + self._num_U_matrices totalnnz = 2 for i in range(0, endW): mats[i].device totalnnz += utils.countNNZ(mats[i].cpu(), self._wSparsity) mats[i] for i in range(endW, endU): mats[i].device totalnnz += utils.countNNZ(mats[i].cpu(), self._uSparsity) mats[i] for i in range(endU, len(mats)): mats[i].device totalnnz += utils.countNNZ(mats[i].cpu(), False) mats[i] return totalnnz * 4 def copy_previous_UW(self): mats = self.getVars() num_mats = self._num_W_matrices + self._num_U_matrices if len(self.oldmats) != num_mats: for i in range(num_mats): self.oldmats.append(torch.FloatTensor()) for i in range(num_mats): self.oldmats[i] = torch.FloatTensor(mats[i].detach().clone()) def sparsify(self): mats = self.getVars() endW = self._num_W_matrices endU = endW + self._num_U_matrices for i in range(0, endW): mats[i] = utils.hardThreshold(mats[i], self._wSparsity) for i in range(endW, endU): mats[i] = utils.hardThreshold(mats[i], self._uSparsity) self.copy_previous_UW() def sparsifyWithSupport(self): mats = self.getVars() endU = self._num_W_matrices + self._num_U_matrices for i in range(0, endU): mats[i] = utils.supportBasedThreshold(mats[i], self.oldmats[i]) class GRULRCellNew(RNNCell): """ GRU LR Cell with Both Full Rank and Low Rank Formulations Has multiple activation functions for the gates hidden_size = # hidden units gate_nonlinearity = nonlinearity for the gate can be chosen from [tanh, sigmoid, relu, quantTanh, quantSigm] update_nonlinearity = nonlinearity for final rnn update can be chosen from [tanh, sigmoid, relu, quantTanh, quantSigm] wRank = rank of W matrix (creates 4 matrices if not None else creates 3 matrices) uRank = rank of U matrix (creates 4 matrices if not None else creates 3 matrices) GRU architecture and compression techniques are found in GRU(LINK) paper Basic architecture is like: r_t = gate_nl(W1x_t + U1h_{t-1} + B_r) z_t = gate_nl(W2x_t + U2h_{t-1} + B_g) h_t^ = update_nl(W3x_t + r_t*U3(h_{t-1}) + B_h) h_t = z_t*h_{t-1} + (1-z_t)*h_t^ Wi and Ui can further parameterised into low rank version by Wi = matmul(W, W_i) and Ui = matmul(U, U_i) """ def __init__(self, input_size, hidden_size, gate_nonlinearity='sigmoid', update_nonlinearity='tanh', wRank=None, uRank=None, wSparsity=1.0, uSparsity=1.0, name='GRULR'): super(GRULRCellNew, self).__init__(input_size, hidden_size, gate_nonlinearity, update_nonlinearity, 3, 3, 3, wRank, uRank, wSparsity, uSparsity) if wRank is not None: self._num_W_matrices += 1 self._num_weight_matrices[0] = self._num_W_matrices if uRank is not None: self._num_U_matrices += 1 self._num_weight_matrices[1] = self._num_U_matrices self._name = name if wRank is None: self.W1 = nn.Parameter(0.1 * torch.randn([input_size, hidden_size]) ) self.W2 = nn.Parameter(0.1 * torch.randn([input_size, hidden_size]) ) self.W3 = nn.Parameter(0.1 * torch.randn([input_size, hidden_size]) ) else: self.W = nn.Parameter(0.1 * torch.randn([input_size, wRank])) self.W1 = nn.Parameter(0.1 * torch.randn([wRank, hidden_size])) self.W2 = nn.Parameter(0.1 * torch.randn([wRank, hidden_size])) self.W3 = nn.Parameter(0.1 * torch.randn([wRank, hidden_size])) if uRank is None: self.U1 = nn.Parameter(0.1 * torch.randn([hidden_size, hidden_size])) self.U2 = nn.Parameter(0.1 * torch.randn([hidden_size, hidden_size])) self.U3 = nn.Parameter(0.1 * torch.randn([hidden_size, hidden_size])) else: self.U = nn.Parameter(0.1 * torch.randn([hidden_size, uRank])) self.U1 = nn.Parameter(0.1 * torch.randn([uRank, hidden_size])) self.U2 = nn.Parameter(0.1 * torch.randn([uRank, hidden_size])) self.U3 = nn.Parameter(0.1 * torch.randn([uRank, hidden_size])) self.bias_r = nn.Parameter(torch.ones([1, hidden_size])) self.bias_gate = nn.Parameter(torch.ones([1, hidden_size])) self.bias_update = nn.Parameter(torch.ones([1, hidden_size])) self._device = self.bias_update.device @property def name(self): return self._name @property def cellType(self): return 'GRULR' def getVars(self): Vars = [] if self._num_W_matrices == 3: Vars.extend([self.W1, self.W2, self.W3]) else: Vars.extend([self.W, self.W1, self.W2, self.W3]) if self._num_U_matrices == 3: Vars.extend([self.U1, self.U2, self.U3]) else: Vars.extend([self.U, self.U1, self.U2, self.U3]) Vars.extend([self.bias_r, self.bias_gate, self.bias_update]) return Vars def forward(self, input_0, input_1): primals_1 = self.W1 primals_3 = self.W2 primals_4 = self.W3 primals_5 = self.U1 primals_7 = self.U2 primals_10 = self.U3 primals_8 = self.bias_r primals_9 = self.bias_gate primals_11 = self.bias_update primals_2 = input_0 primals_6 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0]
ShishirPatil/EdgeML-1
GRULRCell
false
1,073
[ "MIT" ]
0
cbba9f8b989e545788427c004eb8450e7e4c1a21
https://github.com/ShishirPatil/EdgeML-1/tree/cbba9f8b989e545788427c004eb8450e7e4c1a21
import torch import torch.nn as nn import torch.onnx from itertools import product as product def gen_nonlinearity(A, nonlinearity): """ Returns required activation for a tensor based on the inputs nonlinearity is either a callable or a value in ['tanh', 'sigmoid', 'relu', 'quantTanh', 'quantSigm', 'quantSigm4'] """ if nonlinearity == 'tanh': return torch.tanh(A) elif nonlinearity == 'sigmoid': return torch.sigmoid(A) elif nonlinearity == 'relu': return torch.relu(A, 0.0) elif nonlinearity == 'quantTanh': return torch.max(torch.min(A, torch.ones_like(A)), -1.0 * torch. ones_like(A)) elif nonlinearity == 'quantSigm': A = (A + 1.0) / 2.0 return torch.max(torch.min(A, torch.ones_like(A)), torch.zeros_like(A)) elif nonlinearity == 'quantSigm4': A = (A + 2.0) / 4.0 return torch.max(torch.min(A, torch.ones_like(A)), torch.zeros_like(A)) else: if not callable(nonlinearity): raise ValueError( 'nonlinearity is either a callable or a value ' + "['tanh', 'sigmoid', 'relu', 'quantTanh', " + "'quantSigm'") return nonlinearity(A) class RNNCell(nn.Module): def __init__(self, input_size, hidden_size, gate_nonlinearity, update_nonlinearity, num_W_matrices, num_U_matrices, num_biases, wRank=None, uRank=None, wSparsity=1.0, uSparsity=1.0): super().__init__() self._input_size = input_size self._hidden_size = hidden_size self._gate_nonlinearity = gate_nonlinearity self._update_nonlinearity = update_nonlinearity self._num_W_matrices = num_W_matrices self._num_U_matrices = num_U_matrices self._num_biases = num_biases self._num_weight_matrices = [self._num_W_matrices, self. _num_U_matrices, self._num_biases] self._wRank = wRank self._uRank = uRank self._wSparsity = wSparsity self._uSparsity = uSparsity self.oldmats = [] @property def state_size(self): return self._hidden_size @property def input_size(self): return self._input_size @property def output_size(self): return self._hidden_size @property def gate_nonlinearity(self): return self._gate_nonlinearity @property def update_nonlinearity(self): return self._update_nonlinearity @property def wRank(self): return self._wRank @property def uRank(self): return self._uRank @property def num_W_matrices(self): return self._num_W_matrices @property def num_U_matrices(self): return self._num_U_matrices @property def num_weight_matrices(self): return self._num_weight_matrices @property def name(self): raise NotImplementedError() def forward(self, input, state): raise NotImplementedError() def getVars(self): raise NotImplementedError() def get_model_size(self): """ Function to get aimed model size """ mats = self.getVars() endW = self._num_W_matrices endU = endW + self._num_U_matrices totalnnz = 2 for i in range(0, endW): mats[i].device totalnnz += utils.countNNZ(mats[i].cpu(), self._wSparsity) mats[i] for i in range(endW, endU): mats[i].device totalnnz += utils.countNNZ(mats[i].cpu(), self._uSparsity) mats[i] for i in range(endU, len(mats)): mats[i].device totalnnz += utils.countNNZ(mats[i].cpu(), False) mats[i] return totalnnz * 4 def copy_previous_UW(self): mats = self.getVars() num_mats = self._num_W_matrices + self._num_U_matrices if len(self.oldmats) != num_mats: for i in range(num_mats): self.oldmats.append(to # ... truncated (>4000 chars) for memory efficiency
Connect2Model
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/v7/cv7zazascu4rpkkwoxbiwk6c2le2e6wshdhae73bmaoapelvwguv.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # x => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) # %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, xmask) tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/xk/cxkugsynlmnyrjhah42fewrhwovuvurnuv2qimo2qhxq27wjmq7q.py # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] # Source node to ATen node mapping: # softmax => amax, exp, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_5, [1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_5, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x3), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/jf/cjfzp64ny4hf7wdw5wptah3hqv5fcsh5rrw4brz7uxcy6ad57n7h.py # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] # Source node to ATen node mapping: # softmax => div, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x3), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/mq/cmqqbqcbxlzvej3i4srpbdfgxirnooh2wgnwtdxbgyt3e2d762nv.py # Topologically Sorted Source Nodes: [tanh], Original ATen: [aten.tanh] # Source node to ATen node mapping: # tanh => tanh # Graph fragment: # %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%view_7,), kwargs = {}) triton_poi_fused_tanh_3 = async_compile.triton('triton_poi_fused_tanh_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_tanh_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr0 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = libdevice.tanh(tmp3) tl.store(in_out_ptr0 + (x0), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args args.clear() assert_size_stride(primals_1, (16, 4), (4, 1)) assert_size_stride(primals_2, (16, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (16, 16), (16, 1)) assert_size_stride(primals_5, (16, ), (1, )) assert_size_stride(primals_6, (4, 16), (16, 1)) assert_size_stride(primals_7, (4, ), (1, )) assert_size_stride(primals_8, (1, 16), (16, 1)) assert_size_stride(primals_9, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 16), (16, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 16), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 16), (256, 64, 16, 1), 0); del buf0 # reuse buf10 = empty_strided_cuda((4, 4, 4, 16), (256, 64, 16, 1), torch.bool) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf10, 1024, grid=grid(1024), stream=stream0) del primals_2 buf2 = empty_strided_cuda((64, 16), (16, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf1, (64, 16), (16, 1), 0), reinterpret_tensor(primals_4, (16, 16), (1, 16), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 16), (256, 64, 16, 1), 0); del buf2 # reuse buf9 = empty_strided_cuda((4, 4, 4, 16), (256, 64, 16, 1), torch.bool) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_0.run(buf3, primals_5, buf9, 1024, grid=grid(1024), stream=stream0) del primals_5 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [action_logits], Original ATen: [aten.addmm] extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 16), (16, 1), 0), reinterpret_tensor(primals_6, (16, 4), (1, 16), 0), alpha=1, beta=1, out=buf4) del primals_7 buf5 = empty_strided_cuda((64, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf3, (64, 16), (16, 1), 0), reinterpret_tensor(primals_8, (16, 1), (1, 16), 0), out=buf5) buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf4, buf6, 256, grid=grid(256), stream=stream0) buf7 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf4 # reuse # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] triton_poi_fused__softmax_2.run(buf6, buf7, 256, grid=grid(256), stream=stream0) del buf6 buf8 = reinterpret_tensor(buf5, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf5 # reuse # Topologically Sorted Source Nodes: [tanh], Original ATen: [aten.tanh] triton_poi_fused_tanh_3.run(buf8, primals_9, 64, grid=grid(64), stream=stream0) del primals_9 return (buf7, buf8, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 16), (16, 1), 0), reinterpret_tensor(buf3, (64, 16), (16, 1), 0), buf7, buf8, primals_8, primals_6, buf9, primals_4, buf10, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((16, 16), (16, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 16), (16, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((1, 16), (16, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import numpy as np import torch.nn as nn import torch.nn.functional as F class Connect2Model(nn.Module): def __init__(self, board_size, action_size, device): super(Connect2Model, self).__init__() self.device = device self.size = board_size self.action_size = action_size self.fc1 = nn.Linear(in_features=self.size, out_features=16) self.fc2 = nn.Linear(in_features=16, out_features=16) self.action_head = nn.Linear(in_features=16, out_features=self. action_size) self.value_head = nn.Linear(in_features=16, out_features=1) self def forward(self, x): x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) action_logits = self.action_head(x) value_logit = self.value_head(x) return F.softmax(action_logits, dim=1), torch.tanh(value_logit) def predict(self, board): board = torch.FloatTensor(board.astype(np.float32)) board = board.view(1, self.size) self.eval() with torch.no_grad(): pi, v = self.forward(board) return pi.data.cpu().numpy()[0], v.data.cpu().numpy()[0] def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'board_size': 4, 'action_size': 4, 'device': 0}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import numpy as np import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x3, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) @triton.jit def triton_poi_fused_tanh_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = libdevice.tanh(tmp3) tl.store(in_out_ptr0 + x0, tmp4, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (16, 4), (4, 1)) assert_size_stride(primals_2, (16,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (16, 16), (16, 1)) assert_size_stride(primals_5, (16,), (1,)) assert_size_stride(primals_6, (4, 16), (16, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (1, 16), (16, 1)) assert_size_stride(primals_9, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 16), (16, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 16), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 16), (256, 64, 16, 1), 0) del buf0 buf10 = empty_strided_cuda((4, 4, 4, 16), (256, 64, 16, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(1024)](buf1, primals_2, buf10, 1024, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 16), (16, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 16), (16, 1), 0), reinterpret_tensor(primals_4, (16, 16), (1, 16), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 16), (256, 64, 16, 1), 0) del buf2 buf9 = empty_strided_cuda((4, 4, 4, 16), (256, 64, 16, 1), torch.bool) triton_poi_fused_relu_threshold_backward_0[grid(1024)](buf3, primals_5, buf9, 1024, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 16), (16, 1), 0), reinterpret_tensor(primals_6, (16, 4), (1, 16), 0), alpha=1, beta=1, out=buf4) del primals_7 buf5 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf3, (64, 16), (16, 1), 0), reinterpret_tensor(primals_8, (16, 1), (1, 16), 0), out=buf5) buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_1[grid(256)](buf4, buf6, 256, XBLOCK=256, num_warps=4, num_stages=1) buf7 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf4 triton_poi_fused__softmax_2[grid(256)](buf6, buf7, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf6 buf8 = reinterpret_tensor(buf5, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf5 triton_poi_fused_tanh_3[grid(64)](buf8, primals_9, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_9 return buf7, buf8, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 16), (16, 1), 0), reinterpret_tensor( buf3, (64, 16), (16, 1), 0 ), buf7, buf8, primals_8, primals_6, buf9, primals_4, buf10 class Connect2ModelNew(nn.Module): def __init__(self, board_size, action_size, device): super(Connect2ModelNew, self).__init__() self.device = device self.size = board_size self.action_size = action_size self.fc1 = nn.Linear(in_features=self.size, out_features=16) self.fc2 = nn.Linear(in_features=16, out_features=16) self.action_head = nn.Linear(in_features=16, out_features=self. action_size) self.value_head = nn.Linear(in_features=16, out_features=1) self def predict(self, board): board = torch.FloatTensor(board.astype(np.float32)) board = board.view(1, self.size) self.eval() with torch.no_grad(): pi, v = self.forward(board) return pi.data.cpu().numpy()[0], v.data.cpu().numpy()[0] def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_6 = self.action_head.weight primals_7 = self.action_head.bias primals_8 = self.value_head.weight primals_9 = self.value_head.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0], output[1]
ShokuninSan/AlphaZeroSimple
Connect2Model
false
1,074
[ "MIT" ]
0
e32e6a28f872a046705a3f68882139688d5a43c3
https://github.com/ShokuninSan/AlphaZeroSimple/tree/e32e6a28f872a046705a3f68882139688d5a43c3
import torch import numpy as np import torch.nn as nn import torch.nn.functional as F class Model(nn.Module): def __init__(self, board_size, action_size, device): super().__init__() self.device = device self.size = board_size self.action_size = action_size self.fc1 = nn.Linear(in_features=self.size, out_features=16) self.fc2 = nn.Linear(in_features=16, out_features=16) self.action_head = nn.Linear(in_features=16, out_features=self. action_size) self.value_head = nn.Linear(in_features=16, out_features=1) self def forward(self, x): x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) action_logits = self.action_head(x) value_logit = self.value_head(x) return F.softmax(action_logits, dim=1), torch.tanh(value_logit) def predict(self, board): board = torch.FloatTensor(board.astype(np.float32)) board = board.view(1, self.size) self.eval() with torch.no_grad(): pi, v = self.forward(board) return pi.data.cpu().numpy()[0], v.data.cpu().numpy()[0] def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 4, 0]
CausalConv2d
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/2a/c2aucoxj5ek5nl3pl5n67fq4vdl55x2rw66tvv2aq23fsb3jvf5q.py # Topologically Sorted Source Nodes: [out], Original ATen: [aten.constant_pad_nd] # Source node to ATen node mapping: # out => constant_pad_nd # Graph fragment: # %constant_pad_nd : [num_users=2] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%primals_1, [3, 0, 3, 0], 0.0), kwargs = {}) triton_poi_fused_constant_pad_nd_0 = async_compile.triton('triton_poi_fused_constant_pad_nd_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_constant_pad_nd_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 784 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 7) % 7 x0 = xindex % 7 x2 = (xindex // 49) x4 = xindex tmp0 = (-3) + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = (-3) + x0 tmp4 = tmp3 >= tmp1 tmp5 = tmp2 & tmp4 tmp6 = tl.load(in_ptr0 + ((-15) + x0 + (4*x1) + (16*x2)), tmp5 & xmask, other=0.0) tl.store(out_ptr0 + (x4), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/6r/c6rjd3rxthhw2ub6d2gtgjazrjeoyhalmj36ujwzgwexsis27e73.py # Topologically Sorted Source Nodes: [_weight_norm], Original ATen: [aten._weight_norm_interface] # Source node to ATen node mapping: # _weight_norm => div, mul, pow_1, pow_2, sum_1 # Graph fragment: # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_3, 2), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1, 2, 3], True), kwargs = {}) # %pow_2 : [num_users=2] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_2, %pow_2), kwargs = {}) # %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_3, %div), kwargs = {}) triton_per_fused__weight_norm_interface_1 = async_compile.triton('triton_per_fused__weight_norm_interface_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[4, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__weight_norm_interface_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__weight_norm_interface_1(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 4 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (64*x0)), xmask, other=0.0) tmp7 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.where(xmask, tmp2, 0) tmp5 = tl.sum(tmp4, 1)[:, None] tmp6 = libdevice.sqrt(tmp5) tmp8 = tmp7 / tmp6 tmp9 = tmp0 * tmp8 tl.debug_barrier() tl.store(in_out_ptr0 + (x0), tmp6, xmask) tl.store(out_ptr0 + (r1 + (64*x0)), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/w5/cw5gytijzzkwnfpq2a2axdsj4pfxgxmwiuzizuyd4bw5uwnanzw7.py # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.convolution] # Source node to ATen node mapping: # out_1 => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%constant_pad_nd, %mul, %primals_4, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_2 = async_compile.triton('triton_poi_fused_convolution_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 7, 7), (196, 49, 7, 1), torch.float32) # Topologically Sorted Source Nodes: [out], Original ATen: [aten.constant_pad_nd] stream0 = get_raw_stream(0) triton_poi_fused_constant_pad_nd_0.run(primals_1, buf0, 784, grid=grid(784), stream=stream0) del primals_1 buf1 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32) buf2 = reinterpret_tensor(buf1, (4, 1, 1, 1), (1, 1, 1, 1), 0); del buf1 # reuse buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [_weight_norm], Original ATen: [aten._weight_norm_interface] triton_per_fused__weight_norm_interface_1.run(buf2, primals_3, primals_2, buf3, 4, 64, grid=grid(4), stream=stream0) # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.convolution] buf4 = extern_kernels.convolution(buf0, buf3, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 4, 4, 4), (64, 16, 4, 1)) buf5 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.convolution] triton_poi_fused_convolution_2.run(buf5, primals_4, 256, grid=grid(256), stream=stream0) del primals_4 return (buf5, buf3, primals_2, primals_3, buf0, buf2, buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn import torch.utils.data class WNConv2d(nn.Module): def __init__(self, in_channel, out_channel, kernel_size, stride=1, padding=0, bias=True, activation=None): super().__init__() self.conv = nn.utils.weight_norm(nn.Conv2d(in_channel, out_channel, kernel_size, stride=stride, padding=padding, bias=bias)) self.out_channel = out_channel if isinstance(kernel_size, int): kernel_size = [kernel_size, kernel_size] self.kernel_size = kernel_size self.activation = activation def forward(self, input): out = self.conv(input) if self.activation is not None: out = self.activation(out) return out class CausalConv2d(nn.Module): def __init__(self, in_channel, out_channel, kernel_size, stride=1, padding='downright', activation=None): super().__init__() if isinstance(kernel_size, int): kernel_size = [kernel_size] * 2 self.kernel_size = kernel_size if padding == 'downright': pad = [kernel_size[1] - 1, 0, kernel_size[0] - 1, 0] elif padding == 'down' or padding == 'causal': pad = kernel_size[1] // 2 pad = [pad, pad, kernel_size[0] - 1, 0] self.causal = 0 if padding == 'causal': self.causal = kernel_size[1] // 2 self.pad = nn.ZeroPad2d(pad) self.conv = WNConv2d(in_channel, out_channel, kernel_size, stride= stride, padding=0, activation=activation) def forward(self, input): out = self.pad(input) if self.causal > 0: self.conv.conv.weight_v.data[:, :, -1, self.causal:].zero_() out = self.conv(out) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channel': 4, 'out_channel': 4, 'kernel_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch import nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 784 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 7 % 7 x0 = xindex % 7 x2 = xindex // 49 x4 = xindex tmp0 = -3 + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = -3 + x0 tmp4 = tmp3 >= tmp1 tmp5 = tmp2 & tmp4 tmp6 = tl.load(in_ptr0 + (-15 + x0 + 4 * x1 + 16 * x2), tmp5 & xmask, other=0.0) tl.store(out_ptr0 + x4, tmp6, xmask) @triton.jit def triton_per_fused__weight_norm_interface_1(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0) tmp7 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.where(xmask, tmp2, 0) tmp5 = tl.sum(tmp4, 1)[:, None] tmp6 = libdevice.sqrt(tmp5) tmp8 = tmp7 / tmp6 tmp9 = tmp0 * tmp8 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp6, xmask) tl.store(out_ptr0 + (r1 + 64 * x0), tmp9, xmask) @triton.jit def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 7, 7), (196, 49, 7, 1), torch.float32) get_raw_stream(0) triton_poi_fused_constant_pad_nd_0[grid(784)](primals_1, buf0, 784, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32) buf2 = reinterpret_tensor(buf1, (4, 1, 1, 1), (1, 1, 1, 1), 0) del buf1 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_per_fused__weight_norm_interface_1[grid(4)](buf2, primals_3, primals_2, buf3, 4, 64, XBLOCK=1, num_warps=2, num_stages=1) buf4 = extern_kernels.convolution(buf0, buf3, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 4, 4, 4), (64, 16, 4, 1)) buf5 = buf4 del buf4 triton_poi_fused_convolution_2[grid(256)](buf5, primals_4, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_4 return buf5, buf3, primals_2, primals_3, buf0, buf2, buf3 class WNConv2d(nn.Module): def __init__(self, in_channel, out_channel, kernel_size, stride=1, padding=0, bias=True, activation=None): super().__init__() self.conv = nn.utils.weight_norm(nn.Conv2d(in_channel, out_channel, kernel_size, stride=stride, padding=padding, bias=bias)) self.out_channel = out_channel if isinstance(kernel_size, int): kernel_size = [kernel_size, kernel_size] self.kernel_size = kernel_size self.activation = activation def forward(self, input): out = self.conv(input) if self.activation is not None: out = self.activation(out) return out class CausalConv2dNew(nn.Module): def __init__(self, in_channel, out_channel, kernel_size, stride=1, padding='downright', activation=None): super().__init__() if isinstance(kernel_size, int): kernel_size = [kernel_size] * 2 self.kernel_size = kernel_size if padding == 'downright': pad = [kernel_size[1] - 1, 0, kernel_size[0] - 1, 0] elif padding == 'down' or padding == 'causal': pad = kernel_size[1] // 2 pad = [pad, pad, kernel_size[0] - 1, 0] self.causal = 0 if padding == 'causal': self.causal = kernel_size[1] // 2 self.pad = nn.ZeroPad2d(pad) self.conv = WNConv2d(in_channel, out_channel, kernel_size, stride= stride, padding=0, activation=activation) def forward(self, input_0): primals_4 = self.conv.conv.bias primals_2 = self.conv.conv.weight_g primals_1 = self.conv.conv.weight_v primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
Shivanshu-Gupta/KaoKore-VQ-VAE2
CausalConv2d
false
1,075
[ "MIT" ]
0
38a88ba312dee3c0e2c1aaf02e1c1754ba19ac0c
https://github.com/Shivanshu-Gupta/KaoKore-VQ-VAE2/tree/38a88ba312dee3c0e2c1aaf02e1c1754ba19ac0c
import torch from torch import nn import torch.utils.data class WNConv2d(nn.Module): def __init__(self, in_channel, out_channel, kernel_size, stride=1, padding=0, bias=True, activation=None): super().__init__() self.conv = nn.utils.weight_norm(nn.Conv2d(in_channel, out_channel, kernel_size, stride=stride, padding=padding, bias=bias)) self.out_channel = out_channel if isinstance(kernel_size, int): kernel_size = [kernel_size, kernel_size] self.kernel_size = kernel_size self.activation = activation def forward(self, input): out = self.conv(input) if self.activation is not None: out = self.activation(out) return out class Model(nn.Module): def __init__(self, in_channel, out_channel, kernel_size, stride=1, padding='downright', activation=None): super().__init__() if isinstance(kernel_size, int): kernel_size = [kernel_size] * 2 self.kernel_size = kernel_size if padding == 'downright': pad = [kernel_size[1] - 1, 0, kernel_size[0] - 1, 0] elif padding == 'down' or padding == 'causal': pad = kernel_size[1] // 2 pad = [pad, pad, kernel_size[0] - 1, 0] self.causal = 0 if padding == 'causal': self.causal = kernel_size[1] // 2 self.pad = nn.ZeroPad2d(pad) self.conv = WNConv2d(in_channel, out_channel, kernel_size, stride= stride, padding=0, activation=activation) def forward(self, input): out = self.pad(input) if self.causal > 0: self.conv.conv.weight_v.data[:, :, -1, self.causal:].zero_() out = self.conv(out) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 4, 4]
FastGRNNCell
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/w4/cw4ae3o32nppo7llvm7ul7p5le255pvoa5wqcrceacbu2xvt2ydy.py # Topologically Sorted Source Nodes: [pre_comp, add_1, z, add_2, c, mul, sigmoid_1, sub, mul_1, sigmoid_2, add_3, mul_2, new_h], Original ATen: [aten.add, aten.sigmoid, aten.tanh, aten.mul, aten.rsub] # Source node to ATen node mapping: # add_1 => add_1 # add_2 => add_2 # add_3 => add_3 # c => tanh # mul => mul # mul_1 => mul_1 # mul_2 => mul_2 # new_h => add_4 # pre_comp => add # sigmoid_1 => sigmoid_1 # sigmoid_2 => sigmoid_2 # sub => sub # z => sigmoid # Graph fragment: # %add : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %view_3), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %primals_5), kwargs = {}) # %sigmoid : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%add_1,), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %primals_6), kwargs = {}) # %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%add_2,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %primals_4), kwargs = {}) # %sigmoid_1 : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%primals_7,), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %sigmoid), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid_1, %sub), kwargs = {}) # %sigmoid_2 : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%primals_8,), kwargs = {}) # %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %sigmoid_2), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_3, %tanh), kwargs = {}) # %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_2), kwargs = {}) triton_poi_fused_add_mul_rsub_sigmoid_tanh_0 = async_compile.triton('triton_poi_fused_add_mul_rsub_sigmoid_tanh_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_rsub_sigmoid_tanh_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_rsub_sigmoid_tanh_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex x1 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask) tmp3 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr2 + (x0), xmask) tmp8 = tl.load(in_ptr3 + (0)) tmp9 = tl.broadcast_to(tmp8, [XBLOCK]) tmp14 = tl.load(in_ptr4 + (0)) tmp15 = tl.broadcast_to(tmp14, [XBLOCK]) tmp18 = tl.load(in_ptr5 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp5 = tl.sigmoid(tmp4) tmp7 = tmp5 * tmp6 tmp10 = tl.sigmoid(tmp9) tmp11 = 1.0 tmp12 = tmp11 - tmp5 tmp13 = tmp10 * tmp12 tmp16 = tl.sigmoid(tmp15) tmp17 = tmp13 + tmp16 tmp19 = tmp2 + tmp18 tmp20 = libdevice.tanh(tmp19) tmp21 = tmp17 * tmp20 tmp22 = tmp7 + tmp21 tl.store(in_out_ptr0 + (x0), tmp2, xmask) tl.store(out_ptr0 + (x0), tmp22, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_5, (1, 4), (4, 1)) assert_size_stride(primals_6, (1, 4), (4, 1)) assert_size_stride(primals_7, (1, 1), (1, 1)) assert_size_stride(primals_8, (1, 1), (1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [wComp], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), primals_1, out=buf0) del primals_1 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [uComp], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_4, (64, 4), (4, 1), 0), primals_3, out=buf1) del primals_3 buf2 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [pre_comp, add_1, z, add_2, c, mul, sigmoid_1, sub, mul_1, sigmoid_2, add_3, mul_2, new_h], Original ATen: [aten.add, aten.sigmoid, aten.tanh, aten.mul, aten.rsub] stream0 = get_raw_stream(0) triton_poi_fused_add_mul_rsub_sigmoid_tanh_0.run(buf2, buf1, primals_5, primals_4, primals_7, primals_8, primals_6, buf3, 256, grid=grid(256), stream=stream0) del buf1 return (buf3, primals_4, primals_5, primals_6, primals_7, primals_8, buf2, reinterpret_tensor(primals_2, (4, 64), (1, 4), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((1, 1), (1, 1), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((1, 1), (1, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.onnx from itertools import product as product def gen_nonlinearity(A, nonlinearity): """ Returns required activation for a tensor based on the inputs nonlinearity is either a callable or a value in ['tanh', 'sigmoid', 'relu', 'quantTanh', 'quantSigm', 'quantSigm4'] """ if nonlinearity == 'tanh': return torch.tanh(A) elif nonlinearity == 'sigmoid': return torch.sigmoid(A) elif nonlinearity == 'relu': return torch.relu(A, 0.0) elif nonlinearity == 'quantTanh': return torch.max(torch.min(A, torch.ones_like(A)), -1.0 * torch. ones_like(A)) elif nonlinearity == 'quantSigm': A = (A + 1.0) / 2.0 return torch.max(torch.min(A, torch.ones_like(A)), torch.zeros_like(A)) elif nonlinearity == 'quantSigm4': A = (A + 2.0) / 4.0 return torch.max(torch.min(A, torch.ones_like(A)), torch.zeros_like(A)) else: if not callable(nonlinearity): raise ValueError( 'nonlinearity is either a callable or a value ' + "['tanh', 'sigmoid', 'relu', 'quantTanh', " + "'quantSigm'") return nonlinearity(A) class RNNCell(nn.Module): def __init__(self, input_size, hidden_size, gate_nonlinearity, update_nonlinearity, num_W_matrices, num_U_matrices, num_biases, wRank=None, uRank=None, wSparsity=1.0, uSparsity=1.0): super(RNNCell, self).__init__() self._input_size = input_size self._hidden_size = hidden_size self._gate_nonlinearity = gate_nonlinearity self._update_nonlinearity = update_nonlinearity self._num_W_matrices = num_W_matrices self._num_U_matrices = num_U_matrices self._num_biases = num_biases self._num_weight_matrices = [self._num_W_matrices, self. _num_U_matrices, self._num_biases] self._wRank = wRank self._uRank = uRank self._wSparsity = wSparsity self._uSparsity = uSparsity self.oldmats = [] @property def state_size(self): return self._hidden_size @property def input_size(self): return self._input_size @property def output_size(self): return self._hidden_size @property def gate_nonlinearity(self): return self._gate_nonlinearity @property def update_nonlinearity(self): return self._update_nonlinearity @property def wRank(self): return self._wRank @property def uRank(self): return self._uRank @property def num_W_matrices(self): return self._num_W_matrices @property def num_U_matrices(self): return self._num_U_matrices @property def num_weight_matrices(self): return self._num_weight_matrices @property def name(self): raise NotImplementedError() def forward(self, input, state): raise NotImplementedError() def getVars(self): raise NotImplementedError() def get_model_size(self): """ Function to get aimed model size """ mats = self.getVars() endW = self._num_W_matrices endU = endW + self._num_U_matrices totalnnz = 2 for i in range(0, endW): mats[i].device totalnnz += utils.countNNZ(mats[i].cpu(), self._wSparsity) mats[i] for i in range(endW, endU): mats[i].device totalnnz += utils.countNNZ(mats[i].cpu(), self._uSparsity) mats[i] for i in range(endU, len(mats)): mats[i].device totalnnz += utils.countNNZ(mats[i].cpu(), False) mats[i] return totalnnz * 4 def copy_previous_UW(self): mats = self.getVars() num_mats = self._num_W_matrices + self._num_U_matrices if len(self.oldmats) != num_mats: for i in range(num_mats): self.oldmats.append(torch.FloatTensor()) for i in range(num_mats): self.oldmats[i] = torch.FloatTensor(mats[i].detach().clone()) def sparsify(self): mats = self.getVars() endW = self._num_W_matrices endU = endW + self._num_U_matrices for i in range(0, endW): mats[i] = utils.hardThreshold(mats[i], self._wSparsity) for i in range(endW, endU): mats[i] = utils.hardThreshold(mats[i], self._uSparsity) self.copy_previous_UW() def sparsifyWithSupport(self): mats = self.getVars() endU = self._num_W_matrices + self._num_U_matrices for i in range(0, endU): mats[i] = utils.supportBasedThreshold(mats[i], self.oldmats[i]) class FastGRNNCell(RNNCell): """ FastGRNN Cell with Both Full Rank and Low Rank Formulations Has multiple activation functions for the gates hidden_size = # hidden units gate_nonlinearity = nonlinearity for the gate can be chosen from [tanh, sigmoid, relu, quantTanh, quantSigm] update_nonlinearity = nonlinearity for final rnn update can be chosen from [tanh, sigmoid, relu, quantTanh, quantSigm] wRank = rank of W matrix (creates two matrices if not None) uRank = rank of U matrix (creates two matrices if not None) wSparsity = intended sparsity of W matrix(ces) uSparsity = intended sparsity of U matrix(ces) Warning: The Cell will not automatically sparsify. The user must invoke .sparsify to hard threshold. zetaInit = init for zeta, the scale param nuInit = init for nu, the translation param FastGRNN architecture and compression techniques are found in FastGRNN(LINK) paper Basic architecture is like: z_t = gate_nl(Wx_t + Uh_{t-1} + B_g) h_t^ = update_nl(Wx_t + Uh_{t-1} + B_h) h_t = z_t*h_{t-1} + (sigmoid(zeta)(1-z_t) + sigmoid(nu))*h_t^ W and U can further parameterised into low rank version by W = matmul(W_1, W_2) and U = matmul(U_1, U_2) """ def __init__(self, input_size, hidden_size, gate_nonlinearity='sigmoid', update_nonlinearity='tanh', wRank=None, uRank=None, wSparsity=1.0, uSparsity=1.0, zetaInit=1.0, nuInit=-4.0, name='FastGRNN'): super(FastGRNNCell, self).__init__(input_size, hidden_size, gate_nonlinearity, update_nonlinearity, 1, 1, 2, wRank, uRank, wSparsity, uSparsity) self._zetaInit = zetaInit self._nuInit = nuInit if wRank is not None: self._num_W_matrices += 1 self._num_weight_matrices[0] = self._num_W_matrices if uRank is not None: self._num_U_matrices += 1 self._num_weight_matrices[1] = self._num_U_matrices self._name = name if wRank is None: self.W = nn.Parameter(0.1 * torch.randn([input_size, hidden_size])) else: self.W1 = nn.Parameter(0.1 * torch.randn([input_size, wRank])) self.W2 = nn.Parameter(0.1 * torch.randn([wRank, hidden_size])) if uRank is None: self.U = nn.Parameter(0.1 * torch.randn([hidden_size, hidden_size]) ) else: self.U1 = nn.Parameter(0.1 * torch.randn([hidden_size, uRank])) self.U2 = nn.Parameter(0.1 * torch.randn([uRank, hidden_size])) self.bias_gate = nn.Parameter(torch.ones([1, hidden_size])) self.bias_update = nn.Parameter(torch.ones([1, hidden_size])) self.zeta = nn.Parameter(self._zetaInit * torch.ones([1, 1])) self.nu = nn.Parameter(self._nuInit * torch.ones([1, 1])) @property def name(self): return self._name @property def cellType(self): return 'FastGRNN' def forward(self, input, state): if self._wRank is None: wComp = torch.matmul(input, self.W) else: wComp = torch.matmul(torch.matmul(input, self.W1), self.W2) if self._uRank is None: uComp = torch.matmul(state, self.U) else: uComp = torch.matmul(torch.matmul(state, self.U1), self.U2) pre_comp = wComp + uComp z = gen_nonlinearity(pre_comp + self.bias_gate, self._gate_nonlinearity ) c = gen_nonlinearity(pre_comp + self.bias_update, self. _update_nonlinearity) new_h = z * state + (torch.sigmoid(self.zeta) * (1.0 - z) + torch. sigmoid(self.nu)) * c return new_h def getVars(self): Vars = [] if self._num_W_matrices == 1: Vars.append(self.W) else: Vars.extend([self.W1, self.W2]) if self._num_U_matrices == 1: Vars.append(self.U) else: Vars.extend([self.U1, self.U2]) Vars.extend([self.bias_gate, self.bias_update]) Vars.extend([self.zeta, self.nu]) return Vars def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_size': 4, 'hidden_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.onnx from itertools import product as product assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_mul_rsub_sigmoid_tanh_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex x1 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask) tmp3 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr2 + x0, xmask) tmp8 = tl.load(in_ptr3 + 0) tmp9 = tl.broadcast_to(tmp8, [XBLOCK]) tmp14 = tl.load(in_ptr4 + 0) tmp15 = tl.broadcast_to(tmp14, [XBLOCK]) tmp18 = tl.load(in_ptr5 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp5 = tl.sigmoid(tmp4) tmp7 = tmp5 * tmp6 tmp10 = tl.sigmoid(tmp9) tmp11 = 1.0 tmp12 = tmp11 - tmp5 tmp13 = tmp10 * tmp12 tmp16 = tl.sigmoid(tmp15) tmp17 = tmp13 + tmp16 tmp19 = tmp2 + tmp18 tmp20 = libdevice.tanh(tmp19) tmp21 = tmp17 * tmp20 tmp22 = tmp7 + tmp21 tl.store(in_out_ptr0 + x0, tmp2, xmask) tl.store(out_ptr0 + x0, tmp22, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_5, (1, 4), (4, 1)) assert_size_stride(primals_6, (1, 4), (4, 1)) assert_size_stride(primals_7, (1, 1), (1, 1)) assert_size_stride(primals_8, (1, 1), (1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), primals_1, out=buf0) del primals_1 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_4, (64, 4), (4, 1), 0), primals_3, out=buf1) del primals_3 buf2 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mul_rsub_sigmoid_tanh_0[grid(256)](buf2, buf1, primals_5, primals_4, primals_7, primals_8, primals_6, buf3, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf1 return (buf3, primals_4, primals_5, primals_6, primals_7, primals_8, buf2, reinterpret_tensor(primals_2, (4, 64), (1, 4), 0)) def gen_nonlinearity(A, nonlinearity): """ Returns required activation for a tensor based on the inputs nonlinearity is either a callable or a value in ['tanh', 'sigmoid', 'relu', 'quantTanh', 'quantSigm', 'quantSigm4'] """ if nonlinearity == 'tanh': return torch.tanh(A) elif nonlinearity == 'sigmoid': return torch.sigmoid(A) elif nonlinearity == 'relu': return torch.relu(A, 0.0) elif nonlinearity == 'quantTanh': return torch.max(torch.min(A, torch.ones_like(A)), -1.0 * torch. ones_like(A)) elif nonlinearity == 'quantSigm': A = (A + 1.0) / 2.0 return torch.max(torch.min(A, torch.ones_like(A)), torch.zeros_like(A)) elif nonlinearity == 'quantSigm4': A = (A + 2.0) / 4.0 return torch.max(torch.min(A, torch.ones_like(A)), torch.zeros_like(A)) else: if not callable(nonlinearity): raise ValueError( 'nonlinearity is either a callable or a value ' + "['tanh', 'sigmoid', 'relu', 'quantTanh', " + "'quantSigm'") return nonlinearity(A) class RNNCell(nn.Module): def __init__(self, input_size, hidden_size, gate_nonlinearity, update_nonlinearity, num_W_matrices, num_U_matrices, num_biases, wRank=None, uRank=None, wSparsity=1.0, uSparsity=1.0): super(RNNCell, self).__init__() self._input_size = input_size self._hidden_size = hidden_size self._gate_nonlinearity = gate_nonlinearity self._update_nonlinearity = update_nonlinearity self._num_W_matrices = num_W_matrices self._num_U_matrices = num_U_matrices self._num_biases = num_biases self._num_weight_matrices = [self._num_W_matrices, self. _num_U_matrices, self._num_biases] self._wRank = wRank self._uRank = uRank self._wSparsity = wSparsity self._uSparsity = uSparsity self.oldmats = [] @property def state_size(self): return self._hidden_size @property def input_size(self): return self._input_size @property def output_size(self): return self._hidden_size @property def gate_nonlinearity(self): return self._gate_nonlinearity @property def update_nonlinearity(self): return self._update_nonlinearity @property def wRank(self): return self._wRank @property def uRank(self): return self._uRank @property def num_W_matrices(self): return self._num_W_matrices @property def num_U_matrices(self): return self._num_U_matrices @property def num_weight_matrices(self): return self._num_weight_matrices @property def name(self): raise NotImplementedError() def forward(self, input, state): raise NotImplementedError() def getVars(self): raise NotImplementedError() def get_model_size(self): """ Function to get aimed model size """ mats = self.getVars() endW = self._num_W_matrices endU = endW + self._num_U_matrices totalnnz = 2 for i in range(0, endW): mats[i].device totalnnz += utils.countNNZ(mats[i].cpu(), self._wSparsity) mats[i] for i in range(endW, endU): mats[i].device totalnnz += utils.countNNZ(mats[i].cpu(), self._uSparsity) mats[i] for i in range(endU, len(mats)): mats[i].device totalnnz += utils.countNNZ(mats[i].cpu(), False) mats[i] return totalnnz * 4 def copy_previous_UW(self): mats = self.getVars() num_mats = self._num_W_matrices + self._num_U_matrices if len(self.oldmats) != num_mats: for i in range(num_mats): self.oldmats.append(torch.FloatTensor()) for i in range(num_mats): self.oldmats[i] = torch.FloatTensor(mats[i].detach().clone()) def sparsify(self): mats = self.getVars() endW = self._num_W_matrices endU = endW + self._num_U_matrices for i in range(0, endW): mats[i] = utils.hardThreshold(mats[i], self._wSparsity) for i in range(endW, endU): mats[i] = utils.hardThreshold(mats[i], self._uSparsity) self.copy_previous_UW() def sparsifyWithSupport(self): mats = self.getVars() endU = self._num_W_matrices + self._num_U_matrices for i in range(0, endU): mats[i] = utils.supportBasedThreshold(mats[i], self.oldmats[i]) class FastGRNNCellNew(RNNCell): """ FastGRNN Cell with Both Full Rank and Low Rank Formulations Has multiple activation functions for the gates hidden_size = # hidden units gate_nonlinearity = nonlinearity for the gate can be chosen from [tanh, sigmoid, relu, quantTanh, quantSigm] update_nonlinearity = nonlinearity for final rnn update can be chosen from [tanh, sigmoid, relu, quantTanh, quantSigm] wRank = rank of W matrix (creates two matrices if not None) uRank = rank of U matrix (creates two matrices if not None) wSparsity = intended sparsity of W matrix(ces) uSparsity = intended sparsity of U matrix(ces) Warning: The Cell will not automatically sparsify. The user must invoke .sparsify to hard threshold. zetaInit = init for zeta, the scale param nuInit = init for nu, the translation param FastGRNN architecture and compression techniques are found in FastGRNN(LINK) paper Basic architecture is like: z_t = gate_nl(Wx_t + Uh_{t-1} + B_g) h_t^ = update_nl(Wx_t + Uh_{t-1} + B_h) h_t = z_t*h_{t-1} + (sigmoid(zeta)(1-z_t) + sigmoid(nu))*h_t^ W and U can further parameterised into low rank version by W = matmul(W_1, W_2) and U = matmul(U_1, U_2) """ def __init__(self, input_size, hidden_size, gate_nonlinearity='sigmoid', update_nonlinearity='tanh', wRank=None, uRank=None, wSparsity=1.0, uSparsity=1.0, zetaInit=1.0, nuInit=-4.0, name='FastGRNN'): super(FastGRNNCellNew, self).__init__(input_size, hidden_size, gate_nonlinearity, update_nonlinearity, 1, 1, 2, wRank, uRank, wSparsity, uSparsity) self._zetaInit = zetaInit self._nuInit = nuInit if wRank is not None: self._num_W_matrices += 1 self._num_weight_matrices[0] = self._num_W_matrices if uRank is not None: self._num_U_matrices += 1 self._num_weight_matrices[1] = self._num_U_matrices self._name = name if wRank is None: self.W = nn.Parameter(0.1 * torch.randn([input_size, hidden_size])) else: self.W1 = nn.Parameter(0.1 * torch.randn([input_size, wRank])) self.W2 = nn.Parameter(0.1 * torch.randn([wRank, hidden_size])) if uRank is None: self.U = nn.Parameter(0.1 * torch.randn([hidden_size, hidden_size]) ) else: self.U1 = nn.Parameter(0.1 * torch.randn([hidden_size, uRank])) self.U2 = nn.Parameter(0.1 * torch.randn([uRank, hidden_size])) self.bias_gate = nn.Parameter(torch.ones([1, hidden_size])) self.bias_update = nn.Parameter(torch.ones([1, hidden_size])) self.zeta = nn.Parameter(self._zetaInit * torch.ones([1, 1])) self.nu = nn.Parameter(self._nuInit * torch.ones([1, 1])) @property def name(self): return self._name @property def cellType(self): return 'FastGRNN' def getVars(self): Vars = [] if self._num_W_matrices == 1: Vars.append(self.W) else: Vars.extend([self.W1, self.W2]) if self._num_U_matrices == 1: Vars.append(self.U) else: Vars.extend([self.U1, self.U2]) Vars.extend([self.bias_gate, self.bias_update]) Vars.extend([self.zeta, self.nu]) return Vars def forward(self, input_0, input_1): primals_1 = self.W primals_3 = self.U primals_5 = self.bias_gate primals_6 = self.bias_update primals_7 = self.zeta primals_8 = self.nu primals_2 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return output[0]
ShishirPatil/EdgeML-1
FastGRNNCell
false
1,076
[ "MIT" ]
0
cbba9f8b989e545788427c004eb8450e7e4c1a21
https://github.com/ShishirPatil/EdgeML-1/tree/cbba9f8b989e545788427c004eb8450e7e4c1a21
import torch import torch.nn as nn import torch.onnx from itertools import product as product def gen_nonlinearity(A, nonlinearity): """ Returns required activation for a tensor based on the inputs nonlinearity is either a callable or a value in ['tanh', 'sigmoid', 'relu', 'quantTanh', 'quantSigm', 'quantSigm4'] """ if nonlinearity == 'tanh': return torch.tanh(A) elif nonlinearity == 'sigmoid': return torch.sigmoid(A) elif nonlinearity == 'relu': return torch.relu(A, 0.0) elif nonlinearity == 'quantTanh': return torch.max(torch.min(A, torch.ones_like(A)), -1.0 * torch. ones_like(A)) elif nonlinearity == 'quantSigm': A = (A + 1.0) / 2.0 return torch.max(torch.min(A, torch.ones_like(A)), torch.zeros_like(A)) elif nonlinearity == 'quantSigm4': A = (A + 2.0) / 4.0 return torch.max(torch.min(A, torch.ones_like(A)), torch.zeros_like(A)) else: if not callable(nonlinearity): raise ValueError( 'nonlinearity is either a callable or a value ' + "['tanh', 'sigmoid', 'relu', 'quantTanh', " + "'quantSigm'") return nonlinearity(A) class RNNCell(nn.Module): def __init__(self, input_size, hidden_size, gate_nonlinearity, update_nonlinearity, num_W_matrices, num_U_matrices, num_biases, wRank=None, uRank=None, wSparsity=1.0, uSparsity=1.0): super().__init__() self._input_size = input_size self._hidden_size = hidden_size self._gate_nonlinearity = gate_nonlinearity self._update_nonlinearity = update_nonlinearity self._num_W_matrices = num_W_matrices self._num_U_matrices = num_U_matrices self._num_biases = num_biases self._num_weight_matrices = [self._num_W_matrices, self. _num_U_matrices, self._num_biases] self._wRank = wRank self._uRank = uRank self._wSparsity = wSparsity self._uSparsity = uSparsity self.oldmats = [] @property def state_size(self): return self._hidden_size @property def input_size(self): return self._input_size @property def output_size(self): return self._hidden_size @property def gate_nonlinearity(self): return self._gate_nonlinearity @property def update_nonlinearity(self): return self._update_nonlinearity @property def wRank(self): return self._wRank @property def uRank(self): return self._uRank @property def num_W_matrices(self): return self._num_W_matrices @property def num_U_matrices(self): return self._num_U_matrices @property def num_weight_matrices(self): return self._num_weight_matrices @property def name(self): raise NotImplementedError() def forward(self, input, state): raise NotImplementedError() def getVars(self): raise NotImplementedError() def get_model_size(self): """ Function to get aimed model size """ mats = self.getVars() endW = self._num_W_matrices endU = endW + self._num_U_matrices totalnnz = 2 for i in range(0, endW): mats[i].device totalnnz += utils.countNNZ(mats[i].cpu(), self._wSparsity) mats[i] for i in range(endW, endU): mats[i].device totalnnz += utils.countNNZ(mats[i].cpu(), self._uSparsity) mats[i] for i in range(endU, len(mats)): mats[i].device totalnnz += utils.countNNZ(mats[i].cpu(), False) mats[i] return totalnnz * 4 def copy_previous_UW(self): mats = self.getVars() num_mats = self._num_W_matrices + self._num_U_matrices if len(self.oldmats) != num_mats: for i in range(num_mats): self.oldmats.append(to # ... truncated (>4000 chars) for memory efficiency
ResidualConvUnit
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/6q/c6q46q7lsepa4jw5qgcgbc5kiud5wm57hubk6vfo4gk47vl2tprk.py # Topologically Sorted Source Nodes: [out], Original ATen: [aten.relu] # Source node to ATen node mapping: # out => relu # Graph fragment: # %relu : [num_users=4] = call_function[target=torch.ops.aten.relu.default](args = (%primals_1,), kwargs = {}) triton_poi_fused_relu_0 = async_compile.triton('triton_poi_fused_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tl.store(out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/4e/c4efs56ymyev6yow4ruutakn3po5nni7rvtifmzxqreckdzecoje.py # Topologically Sorted Source Nodes: [out_1, out_2], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # out_1 => convolution # out_2 => relu_1 # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_2, %primals_3, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_convolution_relu_1 = async_compile.triton('triton_poi_fused_convolution_relu_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/hc/chcx47xohlbah6m2k2mpiyct2oijgy7vuqj7z6iime4huasayvby.py # Topologically Sorted Source Nodes: [add], Original ATen: [aten.add] # Source node to ATen node mapping: # add => add # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution_1, %relu), kwargs = {}) # %copy_ : [num_users=0] = call_function[target=torch.ops.aten.copy_.default](args = (%primals_1, %relu), kwargs = {}) triton_poi_fused_add_2 = async_compile.triton('triton_poi_fused_add_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_2', 'mutated_arg_names': ['in_out_ptr0', 'out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_2(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask) tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x0), tmp2, xmask) tl.store(out_ptr0 + (x0), tmp1, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [out], Original ATen: [aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_relu_0.run(primals_1, buf0, 256, grid=grid(256), stream=stream0) # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1)) buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [out_1, out_2], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_1.run(buf2, primals_3, 256, grid=grid(256), stream=stream0) del primals_3 # Topologically Sorted Source Nodes: [out_3], Original ATen: [aten.convolution] buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1)) buf4 = buf3; del buf3 # reuse # Topologically Sorted Source Nodes: [add], Original ATen: [aten.add] triton_poi_fused_add_2.run(buf4, buf0, primals_1, 256, grid=grid(256), stream=stream0) del primals_1 return (buf4, primals_2, primals_4, buf0, buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class ResidualConvUnit(nn.Module): """Residual convolution module. """ def __init__(self, features): """Init. Args: features (int): number of features """ super().__init__() self.conv1 = nn.Conv2d(features, features, kernel_size=3, stride=1, padding=1, bias=True) self.conv2 = nn.Conv2d(features, features, kernel_size=3, stride=1, padding=1, bias=False) self.relu = nn.ReLU(inplace=True) def forward(self, x): """Forward pass. Args: x (tensor): input Returns: tensor: output """ out = self.relu(x) out = self.conv1(out) out = self.relu(out) out = self.conv2(out) return out + x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_relu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_add_2(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask) tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x0, tmp2, xmask) tl.store(out_ptr0 + x0, tmp1, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_relu_0[grid(256)](primals_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1)) buf2 = buf1 del buf1 triton_poi_fused_convolution_relu_1[grid(256)](buf2, primals_3, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_3 buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1)) buf4 = buf3 del buf3 triton_poi_fused_add_2[grid(256)](buf4, buf0, primals_1, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 return buf4, primals_2, primals_4, buf0, buf2 class ResidualConvUnitNew(nn.Module): """Residual convolution module. """ def __init__(self, features): """Init. Args: features (int): number of features """ super().__init__() self.conv1 = nn.Conv2d(features, features, kernel_size=3, stride=1, padding=1, bias=True) self.conv2 = nn.Conv2d(features, features, kernel_size=3, stride=1, padding=1, bias=False) self.relu = nn.ReLU(inplace=True) def forward(self, input_0): primals_2 = self.conv1.weight primals_3 = self.conv1.bias primals_4 = self.conv2.weight primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
ShiraLightricks/3d-photo-inpainting
ResidualConvUnit
false
1,077
[ "MIT" ]
0
c42ac41576690b765e50f5281ddbfb58439ff36d
https://github.com/ShiraLightricks/3d-photo-inpainting/tree/c42ac41576690b765e50f5281ddbfb58439ff36d
import torch import torch.nn as nn class Model(nn.Module): """Residual convolution module. """ def __init__(self, features): """Init. Args: features (int): number of features """ super().__init__() self.conv1 = nn.Conv2d(features, features, kernel_size=3, stride=1, padding=1, bias=True) self.conv2 = nn.Conv2d(features, features, kernel_size=3, stride=1, padding=1, bias=False) self.relu = nn.ReLU(inplace=True) def forward(self, x): """Forward pass. Args: x (tensor): input Returns: tensor: output """ out = self.relu(x) out = self.conv1(out) out = self.relu(out) out = self.conv2(out) return out + x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4]
LayerNorm
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/zf/czfnaeipqg4a3qzttb2l6zy5ng44vshk3lfmp25jc2er665hxsmw.py # Topologically Sorted Source Nodes: [mean, sub], Original ATen: [aten.mean, aten.sub] # Source node to ATen node mapping: # mean => mean # sub => sub # Graph fragment: # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%primals_1, [1], True), kwargs = {}) # %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_1, %mean), kwargs = {}) triton_poi_fused_mean_sub_0 = async_compile.triton('triton_poi_fused_mean_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mean_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mean_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = 4.0 tmp9 = tmp7 / tmp8 tmp10 = tmp0 - tmp9 tl.store(out_ptr0 + (x3), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/ix/cix3agfx5ttdzebqllvt7xsyf7hguiv4c5ya7rpk6x57inkbm4xh.py # Topologically Sorted Source Nodes: [pow_1, variance, add, rsqrt, x, mul_1, x_1], Original ATen: [aten.pow, aten.mean, aten.add, aten.rsqrt, aten.mul] # Source node to ATen node mapping: # add => add # mul_1 => mul_1 # pow_1 => pow_1 # rsqrt => rsqrt # variance => mean_1 # x => mul # x_1 => add_1 # Graph fragment: # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {}) # %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%pow_1, [1], True), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean_1, 0.0001), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %view), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %view_1), kwargs = {}) triton_poi_fused_add_mean_mul_pow_rsqrt_1 = async_compile.triton('triton_poi_fused_add_mean_mul_pow_rsqrt_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mean_mul_pow_rsqrt_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mean_mul_pow_rsqrt_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) x1 = (xindex // 16) % 4 tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp18 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp20 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = 4.0 tmp13 = tmp11 / tmp12 tmp14 = 0.0001 tmp15 = tmp13 + tmp14 tmp16 = libdevice.rsqrt(tmp15) tmp17 = tmp0 * tmp16 tmp19 = tmp17 * tmp18 tmp21 = tmp19 + tmp20 tl.store(out_ptr0 + (x3), tmp21, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mean, sub], Original ATen: [aten.mean, aten.sub] stream0 = get_raw_stream(0) triton_poi_fused_mean_sub_0.run(primals_1, buf0, 256, grid=grid(256), stream=stream0) buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [pow_1, variance, add, rsqrt, x, mul_1, x_1], Original ATen: [aten.pow, aten.mean, aten.add, aten.rsqrt, aten.mul] triton_poi_fused_add_mean_mul_pow_rsqrt_1.run(buf0, primals_2, primals_3, buf1, 256, grid=grid(256), stream=stream0) del buf0 del primals_2 del primals_3 return (buf1, primals_1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import numpy as np class BaseModule(torch.nn.Module): def __init__(self): super(BaseModule, self).__init__() @property def nparams(self): """ Returns number of trainable parameters of the module. """ num_params = 0 for name, param in self.named_parameters(): if param.requires_grad: num_params += np.prod(param.detach().cpu().numpy().shape) return num_params def relocate_input(self, x: 'list'): """ Relocates provided tensors to the same device set for the module. """ device = next(self.parameters()).device for i in range(len(x)): if isinstance(x[i], torch.Tensor) and x[i].device != device: x[i] = x[i] return x class LayerNorm(BaseModule): def __init__(self, channels, eps=0.0001): super(LayerNorm, self).__init__() self.channels = channels self.eps = eps self.gamma = torch.nn.Parameter(torch.ones(channels)) self.beta = torch.nn.Parameter(torch.zeros(channels)) def forward(self, x): n_dims = len(x.shape) mean = torch.mean(x, 1, keepdim=True) variance = torch.mean((x - mean) ** 2, 1, keepdim=True) x = (x - mean) * torch.rsqrt(variance + self.eps) shape = [1, -1] + [1] * (n_dims - 2) x = x * self.gamma.view(*shape) + self.beta.view(*shape) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'channels': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import numpy as np assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mean_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = 4.0 tmp9 = tmp7 / tmp8 tmp10 = tmp0 - tmp9 tl.store(out_ptr0 + x3, tmp10, xmask) @triton.jit def triton_poi_fused_add_mean_mul_pow_rsqrt_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 x1 = xindex // 16 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp18 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp20 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = 4.0 tmp13 = tmp11 / tmp12 tmp14 = 0.0001 tmp15 = tmp13 + tmp14 tmp16 = libdevice.rsqrt(tmp15) tmp17 = tmp0 * tmp16 tmp19 = tmp17 * tmp18 tmp21 = tmp19 + tmp20 tl.store(out_ptr0 + x3, tmp21, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mean_sub_0[grid(256)](primals_1, buf0, 256, XBLOCK =128, num_warps=4, num_stages=1) buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_mean_mul_pow_rsqrt_1[grid(256)](buf0, primals_2, primals_3, buf1, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf0 del primals_2 del primals_3 return buf1, primals_1 class BaseModule(torch.nn.Module): def __init__(self): super(BaseModule, self).__init__() @property def nparams(self): """ Returns number of trainable parameters of the module. """ num_params = 0 for name, param in self.named_parameters(): if param.requires_grad: num_params += np.prod(param.detach().cpu().numpy().shape) return num_params def relocate_input(self, x: 'list'): """ Relocates provided tensors to the same device set for the module. """ device = next(self.parameters()).device for i in range(len(x)): if isinstance(x[i], torch.Tensor) and x[i].device != device: x[i] = x[i] return x class LayerNormNew(BaseModule): def __init__(self, channels, eps=0.0001): super(LayerNormNew, self).__init__() self.channels = channels self.eps = eps self.gamma = torch.nn.Parameter(torch.ones(channels)) self.beta = torch.nn.Parameter(torch.zeros(channels)) def forward(self, input_0): primals_2 = self.gamma primals_3 = self.beta primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
Sobsz/uberduck-ml-dev
LayerNorm
false
1,078
[ "Apache-2.0" ]
0
f099238f6f2e3f600d72d89dea3c883c59d91387
https://github.com/Sobsz/uberduck-ml-dev/tree/f099238f6f2e3f600d72d89dea3c883c59d91387
import torch import numpy as np class BaseModule(torch.nn.Module): def __init__(self): super().__init__() @property def nparams(self): """ Returns number of trainable parameters of the module. """ num_params = 0 for name, param in self.named_parameters(): if param.requires_grad: num_params += np.prod(param.detach().cpu().numpy().shape) return num_params def relocate_input(self, x: 'list'): """ Relocates provided tensors to the same device set for the module. """ device = next(self.parameters()).device for i in range(len(x)): if isinstance(x[i], torch.Tensor) and x[i].device != device: x[i] = x[i] return x class Model(BaseModule): def __init__(self, channels, eps=0.0001): super().__init__() self.channels = channels self.eps = eps self.gamma = torch.nn.Parameter(torch.ones(channels)) self.beta = torch.nn.Parameter(torch.zeros(channels)) def forward(self, x): n_dims = len(x.shape) mean = torch.mean(x, 1, keepdim=True) variance = torch.mean((x - mean) ** 2, 1, keepdim=True) x = (x - mean) * torch.rsqrt(variance + self.eps) shape = [1, -1] + [1] * (n_dims - 2) x = x * self.gamma.view(*shape) + self.beta.view(*shape) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4]
Loss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/rg/crgspi4c5nlqn7h3ahzly6mzubmsww42rdge5ypcptnsedezem2z.py # Topologically Sorted Source Nodes: [add, log, mul], Original ATen: [aten.add, aten.log, aten.mul] # Source node to ATen node mapping: # add => add # log => log # mul => mul # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg0_1, 1), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, 4), kwargs = {}) triton_poi_fused_add_log_mul_0 = async_compile.triton('triton_poi_fused_add_log_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_log_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_log_mul_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 1.0 tmp2 = tmp0 + tmp1 tmp3 = tl_math.log(tmp2) tmp4 = 4.0 tmp5 = tmp0 * tmp4 tl.store(out_ptr0 + (x0), tmp3, xmask) tl.store(out_ptr1 + (x0), tmp5, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/li/clivgwtftzyd2kjyxscelobmyhmudzefuhzuzxrxyxyfbvqhgazb.py # Topologically Sorted Source Nodes: [sub, mean, loss, neg], Original ATen: [aten.sub, aten.mean, aten.mul, aten.neg] # Source node to ATen node mapping: # loss => mul_1 # mean => mean # neg => neg # sub => sub # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_2, %view_5), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sub,), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean, 0.25), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%mul_1,), kwargs = {}) triton_per_fused_mean_mul_neg_sub_1 = async_compile.triton('triton_per_fused_mean_mul_neg_sub_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_mul_neg_sub_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_mean_mul_neg_sub_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.load(in_ptr1 + (r0), None) tmp2 = tmp0 - tmp1 tmp3 = tl.broadcast_to(tmp2, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = 256.0 tmp7 = tmp5 / tmp6 tmp8 = 0.25 tmp9 = tmp7 * tmp8 tmp10 = -tmp9 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp10, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [add, log, mul], Original ATen: [aten.add, aten.log, aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_add_log_mul_0.run(arg0_1, buf0, buf2, 256, grid=grid(256), stream=stream0) del arg0_1 buf1 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf0, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1), 0), out=buf1) del arg1_1 buf3 = reinterpret_tensor(buf0, (16, 4, 4), (16, 4, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(arg2_1, (16, 4, 4), (16, 4, 1), 0), out=buf3) del arg2_1 del buf2 buf4 = empty_strided_cuda((), (), torch.float32) buf5 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [sub, mean, loss, neg], Original ATen: [aten.sub, aten.mean, aten.mul, aten.neg] triton_per_fused_mean_mul_neg_sub_1.run(buf5, buf1, buf3, 1, 256, grid=grid(1), stream=stream0) del buf1 del buf3 return (buf5, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1, arg2_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch as t import torch.nn as nn def indicator(K): """ @K: number of users """ return t.eye(5 * K) class Loss(nn.Module): def __init__(self, K, Nt, Vartheta): super(Loss, self).__init__() self.K = K self.Nt = Nt self.Delta = indicator(self.K) self.alpha = 1 / self.K self.Vartheta = Vartheta self.batchsize = 10 def forward(self, x, ind1, ind2): """ @x: output of the last layer, its dimmension is (batchsize, 2*K*K+3*K) """ loss = self.alpha * t.mean(t.matmul(t.log(1 + x), ind1) - t.matmul( self.Vartheta * x, ind2)) return -loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {'K': 4, 'Nt': 4, 'Vartheta': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch as t import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_log_mul_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 1.0 tmp2 = tmp0 + tmp1 tmp3 = tl_math.log(tmp2) tmp4 = 4.0 tmp5 = tmp0 * tmp4 tl.store(out_ptr0 + x0, tmp3, xmask) tl.store(out_ptr1 + x0, tmp5, xmask) @triton.jit def triton_per_fused_mean_mul_neg_sub_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = tmp0 - tmp1 tmp3 = tl.broadcast_to(tmp2, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = 256.0 tmp7 = tmp5 / tmp6 tmp8 = 0.25 tmp9 = tmp7 * tmp8 tmp10 = -tmp9 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp10, None) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_log_mul_0[grid(256)](arg0_1, buf0, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 buf1 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf0, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1), 0), out=buf1 ) del arg1_1 buf3 = reinterpret_tensor(buf0, (16, 4, 4), (16, 4, 1), 0) del buf0 extern_kernels.bmm(reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(arg2_1, (16, 4, 4), (16, 4, 1), 0), out=buf3 ) del arg2_1 del buf2 buf4 = empty_strided_cuda((), (), torch.float32) buf5 = buf4 del buf4 triton_per_fused_mean_mul_neg_sub_1[grid(1)](buf5, buf1, buf3, 1, 256, num_warps=2, num_stages=1) del buf1 del buf3 return buf5, def indicator(K): """ @K: number of users """ return t.eye(5 * K) class LossNew(nn.Module): def __init__(self, K, Nt, Vartheta): super(LossNew, self).__init__() self.K = K self.Nt = Nt self.Delta = indicator(self.K) self.alpha = 1 / self.K self.Vartheta = Vartheta self.batchsize = 10 def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
SoulVen/USRMNet-HWGCN
Loss
false
1,079
[ "Apache-2.0" ]
0
2f99f53150335be26270bd408ce59dc51c8435cc
https://github.com/SoulVen/USRMNet-HWGCN/tree/2f99f53150335be26270bd408ce59dc51c8435cc
import torch import torch as t import torch.nn as nn def indicator(K): """ @K: number of users """ return t.eye(5 * K) class Model(nn.Module): def __init__(self, K, Nt, Vartheta): super().__init__() self.K = K self.Nt = Nt self.Delta = indicator(self.K) self.alpha = 1 / self.K self.Vartheta = Vartheta self.batchsize = 10 def forward(self, x, ind1, ind2): """ @x: output of the last layer, its dimmension is (batchsize, 2*K*K+3*K) """ loss = self.alpha * t.mean(t.matmul(t.log(1 + x), ind1) - t.matmul( self.Vartheta * x, ind2)) return -loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [4, 4, 4]
AttNet
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/dq/cdq267yqqozcarovnsrpdv2jgticcu6trrauhb2nqucmjjuwswn5.py # Topologically Sorted Source Nodes: [concat_feat], Original ATen: [aten.cat] # Source node to ATen node mapping: # concat_feat => cat # Graph fragment: # %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %primals_2],), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 64) x0 = xindex % 64 x2 = xindex tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 2, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + (64*x1)), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 4, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.load(in_ptr1 + (x0 + (64*((-2) + x1))), tmp6 & xmask, other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + (x2), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/d3/cd3wh33oi23pfdaubjmlj5web62n47mpc3cvroacr4ja7m4eyxes.py # Topologically Sorted Source Nodes: [conv2d, weights], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d => convolution # weights => relu # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%cat, %primals_3, %primals_4, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_convolution_relu_1 = async_compile.triton('triton_poi_fused_convolution_relu_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4096], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4096 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 16) % 64 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/k3/ck3ww4lbm3ccb4nqtjzcx44tla623nm2tpo6tnos4pcpcrtdkfly.py # Topologically Sorted Source Nodes: [conv2d_1, weights_1], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d_1 => convolution_1 # weights_1 => relu_1 # Graph fragment: # %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_5, %primals_6, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {}) triton_poi_fused_convolution_relu_2 = async_compile.triton('triton_poi_fused_convolution_relu_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 16 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/7d/c7dn7w5djo7urmcn4tiv47ezted4wgg5pnolpuxzmc3glaqrtwgn.py # Topologically Sorted Source Nodes: [conv2d_2, weights_2], Original ATen: [aten.convolution, aten._softmax] # Source node to ATen node mapping: # conv2d_2 => convolution_2 # weights_2 => amax, exp, sub, sum_1 # Graph fragment: # %convolution_2 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_1, %primals_7, %primals_8, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%convolution_2, [0], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%convolution_2, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [0], True), kwargs = {}) triton_poi_fused__softmax_convolution_3 = async_compile.triton('triton_poi_fused__softmax_convolution_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_convolution_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_convolution_3(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr1 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp4 = tl.load(in_ptr0 + (16 + x0), xmask) tmp7 = tl.load(in_ptr0 + (32 + x0), xmask) tmp10 = tl.load(in_ptr0 + (48 + x0), xmask) tmp3 = tmp0 + tmp2 tmp5 = tmp4 + tmp2 tmp6 = triton_helpers.maximum(tmp3, tmp5) tmp8 = tmp7 + tmp2 tmp9 = triton_helpers.maximum(tmp6, tmp8) tmp11 = tmp10 + tmp2 tmp12 = triton_helpers.maximum(tmp9, tmp11) tmp13 = tmp3 - tmp12 tmp14 = tl_math.exp(tmp13) tmp15 = tmp5 - tmp12 tmp16 = tl_math.exp(tmp15) tmp17 = tmp14 + tmp16 tmp18 = tmp8 - tmp12 tmp19 = tl_math.exp(tmp18) tmp20 = tmp17 + tmp19 tmp21 = tmp11 - tmp12 tmp22 = tl_math.exp(tmp21) tmp23 = tmp20 + tmp22 tl.store(out_ptr0 + (x0), tmp12, xmask) tl.store(out_ptr1 + (x0), tmp23, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/vl/cvl4cyhfl76tcxqwtza2w75eqxw7wmlfnbja3yuh3nzvv64atxf4.py # Topologically Sorted Source Nodes: [conv2d_2, weights_2], Original ATen: [aten.convolution, aten._softmax] # Source node to ATen node mapping: # conv2d_2 => convolution_2 # weights_2 => amax, div, exp, sub, sum_1 # Graph fragment: # %convolution_2 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_1, %primals_7, %primals_8, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%convolution_2, [0], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%convolution_2, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [0], True), kwargs = {}) # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_convolution_4 = async_compile.triton('triton_poi_fused__softmax_convolution_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_convolution_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_convolution_4(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp4 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last') tmp3 = tmp0 + tmp2 tmp5 = tmp3 - tmp4 tmp6 = tl_math.exp(tmp5) tmp8 = tmp6 / tmp7 tl.store(in_out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/uy/cuycvtk2spkqnzbhucr6bdz4m47aquwkozh5wxorhrxoikbox2fk.py # Topologically Sorted Source Nodes: [weight1, mul, mul_1, out_feat], Original ATen: [aten.repeat, aten.mul, aten.add] # Source node to ATen node mapping: # mul => mul # mul_1 => mul_1 # out_feat => add # weight1 => repeat # Graph fragment: # %repeat : [num_users=2] = call_function[target=torch.ops.aten.repeat.default](args = (%getitem, [1, 4, 1, 1]), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%repeat, %primals_1), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%repeat, %primals_2), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {}) triton_poi_fused_add_mul_repeat_5 = async_compile.triton('triton_poi_fused_add_mul_repeat_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_repeat_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_repeat_5(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x2 = (xindex // 64) x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (16*x2)), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x3), xmask) tmp3 = tl.load(in_ptr2 + (x3), xmask) tmp2 = tmp0 * tmp1 tmp4 = tmp0 * tmp3 tmp5 = tmp2 + tmp4 tl.store(out_ptr0 + (x3), tmp5, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8 = args args.clear() assert_size_stride(primals_1, (2, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (2, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (64, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_4, (64, ), (1, )) assert_size_stride(primals_5, (16, 64, 1, 1), (64, 1, 1, 1)) assert_size_stride(primals_6, (16, ), (1, )) assert_size_stride(primals_7, (1, 16, 1, 1), (16, 1, 1, 1)) assert_size_stride(primals_8, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [concat_feat], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(primals_1, primals_2, buf0, 256, grid=grid(256), stream=stream0) # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(buf0, primals_3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 64, 4, 4), (1024, 16, 4, 1)) buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [conv2d, weights], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_1.run(buf2, primals_4, 4096, grid=grid(4096), stream=stream0) del primals_4 # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] buf3 = extern_kernels.convolution(buf2, primals_5, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 16, 4, 4), (256, 16, 4, 1)) buf4 = buf3; del buf3 # reuse # Topologically Sorted Source Nodes: [conv2d_1, weights_1], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_2.run(buf4, primals_6, 1024, grid=grid(1024), stream=stream0) del primals_6 # Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution] buf5 = extern_kernels.convolution(buf4, primals_7, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf5, (4, 1, 4, 4), (16, 16, 4, 1)) buf6 = empty_strided_cuda((1, 1, 4, 4), (16, 16, 4, 1), torch.float32) buf7 = empty_strided_cuda((1, 1, 4, 4), (16, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [conv2d_2, weights_2], Original ATen: [aten.convolution, aten._softmax] triton_poi_fused__softmax_convolution_3.run(buf5, primals_8, buf6, buf7, 16, grid=grid(16), stream=stream0) buf8 = buf5; del buf5 # reuse # Topologically Sorted Source Nodes: [conv2d_2, weights_2], Original ATen: [aten.convolution, aten._softmax] triton_poi_fused__softmax_convolution_4.run(buf8, primals_8, buf6, buf7, 64, grid=grid(64), stream=stream0) del buf6 del buf7 del primals_8 buf9 = empty_strided_cuda((2, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [weight1, mul, mul_1, out_feat], Original ATen: [aten.repeat, aten.mul, aten.add] triton_poi_fused_add_mul_repeat_5.run(buf8, primals_1, primals_2, buf9, 128, grid=grid(128), stream=stream0) return (buf9, primals_1, primals_2, primals_3, primals_5, primals_7, buf0, buf2, buf4, buf8, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((2, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((2, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((64, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((16, 64, 1, 1), (64, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((1, 16, 1, 1), (16, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class AttNet(nn.Module): def __init__(self, num_input_ch): super(AttNet, self).__init__() self.num_input_ch = num_input_ch self.conv1 = nn.Conv2d(self.num_input_ch, 64, 3, padding=1, bias=True) self.conv2 = nn.Conv2d(64, 16, 1, bias=True) self.conv3 = nn.Conv2d(16, 1, 1, bias=True) def forward(self, warp_feat, conv_feat): concat_feat = torch.cat([warp_feat, conv_feat], dim=0) weights = F.relu(self.conv1(concat_feat)) weights = F.relu(self.conv2(weights)) weights = F.softmax(self.conv3(weights), dim=0) weights = torch.split(weights, 2, dim=0) weight1 = torch.tile(weights[0], (1, self.num_input_ch, 1, 1)) weight2 = torch.tile(weights[0], (1, self.num_input_ch, 1, 1)) out_feat = weight1 * warp_feat + weight2 * conv_feat return out_feat def get_inputs(): return [torch.rand([2, 4, 4, 4]), torch.rand([2, 4, 4, 4])] def get_init_inputs(): return [[], {'num_input_ch': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 64 x0 = xindex % 64 x2 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 2, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 64 * x1), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 4, tl.int64) tmp9 = tl.load(in_ptr1 + (x0 + 64 * (-2 + x1)), tmp6 & xmask, other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x2, tmp10, xmask) @triton.jit def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 16 % 64 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 16 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused__softmax_convolution_3(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp4 = tl.load(in_ptr0 + (16 + x0), xmask) tmp7 = tl.load(in_ptr0 + (32 + x0), xmask) tmp10 = tl.load(in_ptr0 + (48 + x0), xmask) tmp3 = tmp0 + tmp2 tmp5 = tmp4 + tmp2 tmp6 = triton_helpers.maximum(tmp3, tmp5) tmp8 = tmp7 + tmp2 tmp9 = triton_helpers.maximum(tmp6, tmp8) tmp11 = tmp10 + tmp2 tmp12 = triton_helpers.maximum(tmp9, tmp11) tmp13 = tmp3 - tmp12 tmp14 = tl_math.exp(tmp13) tmp15 = tmp5 - tmp12 tmp16 = tl_math.exp(tmp15) tmp17 = tmp14 + tmp16 tmp18 = tmp8 - tmp12 tmp19 = tl_math.exp(tmp18) tmp20 = tmp17 + tmp19 tmp21 = tmp11 - tmp12 tmp22 = tl_math.exp(tmp21) tmp23 = tmp20 + tmp22 tl.store(out_ptr0 + x0, tmp12, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused__softmax_convolution_4(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp4 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp0 + tmp2 tmp5 = tmp3 - tmp4 tmp6 = tl_math.exp(tmp5) tmp8 = tmp6 / tmp7 tl.store(in_out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_add_mul_repeat_5(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x2 = xindex // 64 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr1 + x3, xmask) tmp3 = tl.load(in_ptr2 + x3, xmask) tmp2 = tmp0 * tmp1 tmp4 = tmp0 * tmp3 tmp5 = tmp2 + tmp4 tl.store(out_ptr0 + x3, tmp5, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8) = args args.clear() assert_size_stride(primals_1, (2, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (2, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (64, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_4, (64,), (1,)) assert_size_stride(primals_5, (16, 64, 1, 1), (64, 1, 1, 1)) assert_size_stride(primals_6, (16,), (1,)) assert_size_stride(primals_7, (1, 16, 1, 1), (16, 1, 1, 1)) assert_size_stride(primals_8, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(256)](primals_1, primals_2, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) buf1 = extern_kernels.convolution(buf0, primals_3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 64, 4, 4), (1024, 16, 4, 1)) buf2 = buf1 del buf1 triton_poi_fused_convolution_relu_1[grid(4096)](buf2, primals_4, 4096, XBLOCK=256, num_warps=4, num_stages=1) del primals_4 buf3 = extern_kernels.convolution(buf2, primals_5, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 16, 4, 4), (256, 16, 4, 1)) buf4 = buf3 del buf3 triton_poi_fused_convolution_relu_2[grid(1024)](buf4, primals_6, 1024, XBLOCK=256, num_warps=4, num_stages=1) del primals_6 buf5 = extern_kernels.convolution(buf4, primals_7, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf5, (4, 1, 4, 4), (16, 16, 4, 1)) buf6 = empty_strided_cuda((1, 1, 4, 4), (16, 16, 4, 1), torch.float32) buf7 = empty_strided_cuda((1, 1, 4, 4), (16, 16, 4, 1), torch.float32) triton_poi_fused__softmax_convolution_3[grid(16)](buf5, primals_8, buf6, buf7, 16, XBLOCK=16, num_warps=1, num_stages=1) buf8 = buf5 del buf5 triton_poi_fused__softmax_convolution_4[grid(64)](buf8, primals_8, buf6, buf7, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf6 del buf7 del primals_8 buf9 = empty_strided_cuda((2, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_mul_repeat_5[grid(128)](buf8, primals_1, primals_2, buf9, 128, XBLOCK=128, num_warps=4, num_stages=1) return (buf9, primals_1, primals_2, primals_3, primals_5, primals_7, buf0, buf2, buf4, buf8) class AttNetNew(nn.Module): def __init__(self, num_input_ch): super(AttNetNew, self).__init__() self.num_input_ch = num_input_ch self.conv1 = nn.Conv2d(self.num_input_ch, 64, 3, padding=1, bias=True) self.conv2 = nn.Conv2d(64, 16, 1, bias=True) self.conv3 = nn.Conv2d(16, 1, 1, bias=True) def forward(self, input_0, input_1): primals_3 = self.conv1.weight primals_4 = self.conv1.bias primals_5 = self.conv2.weight primals_6 = self.conv2.bias primals_7 = self.conv3.weight primals_8 = self.conv3.bias primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return output[0]
SionHu/LP-MOT
AttNet
false
1,080
[ "MIT" ]
0
90e6a1d51ebe1a948ac5c018a5ee560654e824f1
https://github.com/SionHu/LP-MOT/tree/90e6a1d51ebe1a948ac5c018a5ee560654e824f1
import torch import torch.nn as nn import torch.nn.functional as F class Model(nn.Module): def __init__(self, num_input_ch): super().__init__() self.num_input_ch = num_input_ch self.conv1 = nn.Conv2d(self.num_input_ch, 64, 3, padding=1, bias=True) self.conv2 = nn.Conv2d(64, 16, 1, bias=True) self.conv3 = nn.Conv2d(16, 1, 1, bias=True) def forward(self, warp_feat, conv_feat): concat_feat = torch.cat([warp_feat, conv_feat], dim=0) weights = F.relu(self.conv1(concat_feat)) weights = F.relu(self.conv2(weights)) weights = F.softmax(self.conv3(weights), dim=0) weights = torch.split(weights, 2, dim=0) weight1 = torch.tile(weights[0], (1, self.num_input_ch, 1, 1)) weight2 = torch.tile(weights[0], (1, self.num_input_ch, 1, 1)) out_feat = weight1 * warp_feat + weight2 * conv_feat return out_feat def get_inputs(): return [torch.rand([2, 4, 4, 4]), torch.rand([2, 4, 4, 4])] def get_init_inputs(): return [4]
Net
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/es/cesrwuwkeluuf67vbj367seoctnhxwytz3gxjpe3dmncvzr46phg.py # Topologically Sorted Source Nodes: [out], Original ATen: [aten.cat] # Source node to ATen node mapping: # out => cat # Graph fragment: # %cat : [num_users=3] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_2, %mm], 1), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tl.store(out_ptr0 + (x0 + (8*x1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/d2/cd2iuy4xijl6opmrfcyh5n4ktkq3vxdew7ukyggi4oe4nmubofvx.py # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.cat] # Source node to ATen node mapping: # out_1 => cat_1 # Graph fragment: # %cat_1 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%cat, %mm_1], 1), kwargs = {}) triton_poi_fused_cat_1 = async_compile.triton('triton_poi_fused_cat_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 8 x1 = (xindex // 8) tmp0 = tl.load(in_ptr0 + (x2), xmask) tl.store(out_ptr0 + (x0 + (12*x1)), tmp0, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 8), (8, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf2 = empty_strided_cuda((4, 8), (8, 1), torch.float32) buf0 = reinterpret_tensor(buf2, (4, 4), (8, 1), 4) # alias # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.mm] extern_kernels.mm(primals_2, reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf2, (4, 4), (8, 1), 0) # alias # Topologically Sorted Source Nodes: [out], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(primals_2, buf1, 16, grid=grid(16), stream=stream0) buf5 = empty_strided_cuda((4, 12), (12, 1), torch.float32) buf3 = reinterpret_tensor(buf5, (4, 4), (12, 1), 8) # alias # Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.mm] extern_kernels.mm(buf2, reinterpret_tensor(primals_3, (8, 4), (1, 8), 0), out=buf3) buf4 = reinterpret_tensor(buf5, (4, 8), (12, 1), 0) # alias # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.cat] triton_poi_fused_cat_1.run(buf2, buf4, 32, grid=grid(32), stream=stream0) return (buf5, primals_2, buf2, primals_3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class FcCat(nn.Module): def __init__(self, nIn, nOut): super(FcCat, self).__init__() self.fc = nn.Linear(nIn, nOut, bias=False) def forward(self, x): out = torch.cat((x, self.fc(x)), 1) return out class Net(nn.Module): def __init__(self, nFeatures, nHidden1, nHidden2): super(Net, self).__init__() self.l1 = FcCat(nFeatures, nHidden1) self.l2 = FcCat(nFeatures + nHidden1, nHidden2) def forward(self, x): out = self.l1(x) out = self.l2(out) return out def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'nFeatures': 4, 'nHidden1': 4, 'nHidden2': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tl.store(out_ptr0 + (x0 + 8 * x1), tmp0, xmask) @triton.jit def triton_poi_fused_cat_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 8 x1 = xindex // 8 tmp0 = tl.load(in_ptr0 + x2, xmask) tl.store(out_ptr0 + (x0 + 12 * x1), tmp0, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 8), (8, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf2 = empty_strided_cuda((4, 8), (8, 1), torch.float32) buf0 = reinterpret_tensor(buf2, (4, 4), (8, 1), 4) extern_kernels.mm(primals_2, reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf2, (4, 4), (8, 1), 0) get_raw_stream(0) triton_poi_fused_cat_0[grid(16)](primals_2, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) buf5 = empty_strided_cuda((4, 12), (12, 1), torch.float32) buf3 = reinterpret_tensor(buf5, (4, 4), (12, 1), 8) extern_kernels.mm(buf2, reinterpret_tensor(primals_3, (8, 4), (1, 8 ), 0), out=buf3) buf4 = reinterpret_tensor(buf5, (4, 8), (12, 1), 0) triton_poi_fused_cat_1[grid(32)](buf2, buf4, 32, XBLOCK=32, num_warps=1, num_stages=1) return buf5, primals_2, buf2, primals_3 class FcCat(nn.Module): def __init__(self, nIn, nOut): super(FcCat, self).__init__() self.fc = nn.Linear(nIn, nOut, bias=False) def forward(self, x): out = torch.cat((x, self.fc(x)), 1) return out class NetNew(nn.Module): def __init__(self, nFeatures, nHidden1, nHidden2): super(NetNew, self).__init__() self.l1 = FcCat(nFeatures, nHidden1) self.l2 = FcCat(nFeatures + nHidden1, nHidden2) def forward(self, input_0): primals_1 = self.l1.fc.weight primals_3 = self.l2.fc.weight primals_2 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
Sreehari-S/Tiramisu_DigestPath
Net
false
1,081
[ "Apache-2.0" ]
0
a884ee911bc60ce997996e0ec2e6036600ffcffa
https://github.com/Sreehari-S/Tiramisu_DigestPath/tree/a884ee911bc60ce997996e0ec2e6036600ffcffa
import torch import torch.nn as nn class FcCat(nn.Module): def __init__(self, nIn, nOut): super().__init__() self.fc = nn.Linear(nIn, nOut, bias=False) def forward(self, x): out = torch.cat((x, self.fc(x)), 1) return out class Model(nn.Module): def __init__(self, nFeatures, nHidden1, nHidden2): super().__init__() self.l1 = FcCat(nFeatures, nHidden1) self.l2 = FcCat(nFeatures + nHidden1, nHidden2) def forward(self, x): out = self.l1(x) out = self.l2(out) return out def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [4, 4, 4]
DecoderLayer
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/fg/cfg742icmosiwp5ugziye26din5ueqx3v7ntptkkpyackudldrxs.py # Topologically Sorted Source Nodes: [eq], Original ATen: [aten.eq] # Source node to ATen node mapping: # eq => eq # Graph fragment: # %eq : [num_users=2] = call_function[target=torch.ops.aten.eq.Scalar](args = (%primals_8, 0), kwargs = {}) triton_poi_fused_eq_0 = async_compile.triton('triton_poi_fused_eq_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_eq_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_eq_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 0.0 tmp2 = tmp0 == tmp1 tl.store(out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/g7/cg74ay746yb3ivb67q6pz3e2xibwamviqoisgmyzopmnr47hrosh.py # Topologically Sorted Source Nodes: [attention_score_1, attention_score_2, attention_score_3], Original ATen: [aten.div, aten.masked_fill, aten._softmax] # Source node to ATen node mapping: # attention_score_1 => div # attention_score_2 => full_default, where # attention_score_3 => amax, exp, sub, sum_1 # Graph fragment: # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_11, 1.0), kwargs = {}) # %full_default : [num_users=2] = call_function[target=torch.ops.aten.full.default](args = ([], -1000000000.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%eq, %full_default, %div), kwargs = {}) # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%where, [-1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) triton_poi_fused__softmax_div_masked_fill_1 = async_compile.triton('triton_poi_fused__softmax_div_masked_fill_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*i1', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_div_masked_fill_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_div_masked_fill_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (4*x2), xmask, eviction_policy='evict_last').to(tl.int1) tmp1 = tl.load(in_ptr1 + (x2), xmask) tmp2 = tl.load(in_ptr2 + (4*x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (1 + (4*x2)), xmask, eviction_policy='evict_last').to(tl.int1) tmp9 = tl.load(in_ptr2 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr0 + (2 + (4*x2)), xmask, eviction_policy='evict_last').to(tl.int1) tmp15 = tl.load(in_ptr2 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp20 = tl.load(in_ptr0 + (3 + (4*x2)), xmask, eviction_policy='evict_last').to(tl.int1) tmp21 = tl.load(in_ptr2 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 * tmp2 tmp4 = 1.0 tmp5 = tmp3 * tmp4 tmp6 = -1000000000.0 tmp7 = tl.where(tmp0, tmp6, tmp5) tmp10 = tmp1 * tmp9 tmp11 = tmp10 * tmp4 tmp12 = tl.where(tmp8, tmp6, tmp11) tmp13 = triton_helpers.maximum(tmp7, tmp12) tmp16 = tmp1 * tmp15 tmp17 = tmp16 * tmp4 tmp18 = tl.where(tmp14, tmp6, tmp17) tmp19 = triton_helpers.maximum(tmp13, tmp18) tmp22 = tmp1 * tmp21 tmp23 = tmp22 * tmp4 tmp24 = tl.where(tmp20, tmp6, tmp23) tmp25 = triton_helpers.maximum(tmp19, tmp24) tmp26 = tmp7 - tmp25 tmp27 = tl_math.exp(tmp26) tmp28 = tmp12 - tmp25 tmp29 = tl_math.exp(tmp28) tmp30 = tmp27 + tmp29 tmp31 = tmp18 - tmp25 tmp32 = tl_math.exp(tmp31) tmp33 = tmp30 + tmp32 tmp34 = tmp24 - tmp25 tmp35 = tl_math.exp(tmp34) tmp36 = tmp33 + tmp35 tl.store(out_ptr0 + (x2), tmp25, xmask) tl.store(out_ptr1 + (x2), tmp36, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/na/cna5revtaqihb3t6bnp24kdg6eqircycsi7slr6uzmhkwpelq6x7.py # Topologically Sorted Source Nodes: [attention_score_1, attention_score_2, attention_score_3], Original ATen: [aten.div, aten.masked_fill, aten._softmax] # Source node to ATen node mapping: # attention_score_1 => div # attention_score_2 => full_default, where # attention_score_3 => div_1, exp, sub # Graph fragment: # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_11, 1.0), kwargs = {}) # %full_default : [num_users=2] = call_function[target=torch.ops.aten.full.default](args = ([], -1000000000.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%eq, %full_default, %div), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_div_masked_fill_2 = async_compile.triton('triton_poi_fused__softmax_div_masked_fill_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*i1', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_div_masked_fill_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_div_masked_fill_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x4 = (xindex // 4) x0 = xindex % 4 x2 = (xindex // 16) tmp0 = tl.load(in_ptr0 + (x3), xmask).to(tl.int1) tmp1 = tl.load(in_ptr1 + (x4), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + (x0 + (4*x2)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr3 + (x4), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr4 + (x4), xmask, eviction_policy='evict_last') tmp3 = tmp1 * tmp2 tmp4 = 1.0 tmp5 = tmp3 * tmp4 tmp6 = -1000000000.0 tmp7 = tl.where(tmp0, tmp6, tmp5) tmp9 = tmp7 - tmp8 tmp10 = tl_math.exp(tmp9) tmp12 = tmp10 / tmp11 tl.store(out_ptr0 + (x3), tmp12, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/tc/ctcugqu2nbqlxcf2thnspnnypxifbalbzmclmutd5vaxdes2oyyk.py # Topologically Sorted Source Nodes: [add, layer_norm], Original ATen: [aten.add, aten.native_layer_norm] # Source node to ATen node mapping: # add => add # layer_norm => var_mean # Graph fragment: # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_17, %primals_1), kwargs = {}) # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add, [2]), kwargs = {correction: 0, keepdim: True}) triton_poi_fused_add_native_layer_norm_3 = async_compile.triton('triton_poi_fused_add_native_layer_norm_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_native_layer_norm_3(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + (x0), tmp16, xmask) tl.store(out_ptr1 + (x0), tmp28, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/ua/cuaquah4oaz43nhi25wixnpzlhvf2zfzdiezhmmwkuy5wfhtw6z4.py # Topologically Sorted Source Nodes: [add, layer_norm], Original ATen: [aten.add, aten.native_layer_norm] # Source node to ATen node mapping: # add => add # layer_norm => add_1, add_2, mul_1, mul_2, rsqrt, sub_1 # Graph fragment: # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_17, %primals_1), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_1,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %getitem_1), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %rsqrt), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_1, %primals_11), kwargs = {}) # %add_2 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_2, %primals_12), kwargs = {}) triton_poi_fused_add_native_layer_norm_4 = async_compile.triton('triton_poi_fused_add_native_layer_norm_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_native_layer_norm_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x2), xmask) tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + (x2), tmp13, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/56/c566ex7xddxc2fqpwqlmymdyd23nesbsyghxftm7cy73ebnuo3ke.py # Topologically Sorted Source Nodes: [add_1], Original ATen: [aten.add] # Source node to ATen node mapping: # add_1 => add_3 # Graph fragment: # %add_3 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_35, %add_2), kwargs = {}) triton_poi_fused_add_5 = async_compile.triton('triton_poi_fused_add_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_5(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x2), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/7b/c7b2yb3hgvixcalmjh5bpsuibu3izis7v3a6ccjg3fsa77ovsm2d.py # Topologically Sorted Source Nodes: [layer_norm_1], Original ATen: [aten.native_layer_norm] # Source node to ATen node mapping: # layer_norm_1 => add_4, rsqrt_1, var_mean_1 # Graph fragment: # %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add_3, [2]), kwargs = {correction: 0, keepdim: True}) # %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-05), kwargs = {}) # %rsqrt_1 : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_4,), kwargs = {}) triton_poi_fused_native_layer_norm_6 = async_compile.triton('triton_poi_fused_native_layer_norm_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_layer_norm_6(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + (x0), tmp8, xmask) tl.store(out_ptr1 + (x0), tmp23, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/z6/cz657lehnuecvtrckws4hxctt6heob44wmjdfvlblgm7yc4swnal.py # Topologically Sorted Source Nodes: [layer_norm_1], Original ATen: [aten.native_layer_norm] # Source node to ATen node mapping: # layer_norm_1 => add_4, add_5, mul_4, mul_5, rsqrt_1, sub_3, var_mean_1 # Graph fragment: # %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add_3, [2]), kwargs = {correction: 0, keepdim: True}) # %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-05), kwargs = {}) # %rsqrt_1 : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_4,), kwargs = {}) # %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_3, %getitem_3), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_3, %rsqrt_1), kwargs = {}) # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_4, %primals_23), kwargs = {}) # %add_5 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_5, %primals_24), kwargs = {}) triton_poi_fused_native_layer_norm_7 = async_compile.triton('triton_poi_fused_native_layer_norm_7', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_layer_norm_7(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/23/c23j2vk753qpctrm5kblwdo7f2zh4pnjylmlrcdhyl7syfjonudr.py # Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # relu => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_37,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_8 = async_compile.triton('triton_poi_fused_relu_threshold_backward_8', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_8', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_8(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, xmask) tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4, ), (1, )) assert_size_stride(primals_8, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_9, (4, 4), (4, 1)) assert_size_stride(primals_10, (4, ), (1, )) assert_size_stride(primals_11, (4, ), (1, )) assert_size_stride(primals_12, (4, ), (1, )) assert_size_stride(primals_13, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_14, (4, 4), (4, 1)) assert_size_stride(primals_15, (4, ), (1, )) assert_size_stride(primals_16, (4, 4), (4, 1)) assert_size_stride(primals_17, (4, ), (1, )) assert_size_stride(primals_18, (4, 4), (4, 1)) assert_size_stride(primals_19, (4, ), (1, )) assert_size_stride(primals_20, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_21, (4, 4), (4, 1)) assert_size_stride(primals_22, (4, ), (1, )) assert_size_stride(primals_23, (4, ), (1, )) assert_size_stride(primals_24, (4, ), (1, )) assert_size_stride(primals_25, (4, 4), (4, 1)) assert_size_stride(primals_26, (4, ), (1, )) assert_size_stride(primals_27, (4, 4), (4, 1)) assert_size_stride(primals_28, (4, ), (1, )) assert_size_stride(primals_29, (4, ), (1, )) assert_size_stride(primals_30, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [prj_query], Original ATen: [aten.addmm] extern_kernels.addmm(primals_3, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_2 del primals_3 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [prj_key], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_4 del primals_5 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [prj_value], Original ATen: [aten.addmm] extern_kernels.addmm(primals_7, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_6 del primals_7 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [eq], Original ATen: [aten.eq] stream0 = get_raw_stream(0) triton_poi_fused_eq_0.run(primals_8, buf3, 256, grid=grid(256), stream=stream0) del primals_8 buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf5 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) # Topologically Sorted Source Nodes: [attention_score_1, attention_score_2, attention_score_3], Original ATen: [aten.div, aten.masked_fill, aten._softmax] triton_poi_fused__softmax_div_masked_fill_1.run(buf3, buf0, buf1, buf4, buf5, 64, grid=grid(64), stream=stream0) buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [attention_score_1, attention_score_2, attention_score_3], Original ATen: [aten.div, aten.masked_fill, aten._softmax] triton_poi_fused__softmax_div_masked_fill_2.run(buf3, buf0, buf1, buf4, buf5, buf6, 256, grid=grid(256), stream=stream0) buf7 = reinterpret_tensor(buf5, (16, 4, 1), (4, 1, 1), 0); del buf5 # reuse # Topologically Sorted Source Nodes: [result], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf6, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0), out=buf7) buf8 = reinterpret_tensor(buf4, (16, 4), (4, 1), 0); del buf4 # reuse # Topologically Sorted Source Nodes: [output_1], Original ATen: [aten.addmm] extern_kernels.addmm(primals_10, reinterpret_tensor(buf7, (16, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf8) del primals_10 buf9 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf10 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) # Topologically Sorted Source Nodes: [add, layer_norm], Original ATen: [aten.add, aten.native_layer_norm] triton_poi_fused_add_native_layer_norm_3.run(buf8, primals_1, buf9, buf10, 16, grid=grid(16), stream=stream0) buf11 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [add, layer_norm], Original ATen: [aten.add, aten.native_layer_norm] triton_poi_fused_add_native_layer_norm_4.run(buf8, primals_1, buf9, buf10, primals_11, primals_12, buf11, 64, grid=grid(64), stream=stream0) del primals_12 buf12 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [prj_query_1], Original ATen: [aten.addmm] extern_kernels.addmm(primals_15, reinterpret_tensor(buf11, (16, 4), (4, 1), 0), reinterpret_tensor(primals_14, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf12) del primals_15 buf13 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [prj_key_1], Original ATen: [aten.addmm] extern_kernels.addmm(primals_17, reinterpret_tensor(primals_13, (16, 4), (4, 1), 0), reinterpret_tensor(primals_16, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf13) del primals_16 del primals_17 buf14 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [prj_value_1], Original ATen: [aten.addmm] extern_kernels.addmm(primals_19, reinterpret_tensor(primals_13, (16, 4), (4, 1), 0), reinterpret_tensor(primals_18, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf14) del primals_18 del primals_19 buf15 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [eq_1], Original ATen: [aten.eq] triton_poi_fused_eq_0.run(primals_20, buf15, 256, grid=grid(256), stream=stream0) del primals_20 buf16 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf17 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) # Topologically Sorted Source Nodes: [attention_score_2, attention_score_5, attention_score_6, attention_score_7], Original ATen: [aten.masked_fill, aten.div, aten._softmax] triton_poi_fused__softmax_div_masked_fill_1.run(buf15, buf12, buf13, buf16, buf17, 64, grid=grid(64), stream=stream0) buf18 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [attention_score_2, attention_score_5, attention_score_6, attention_score_7], Original ATen: [aten.masked_fill, aten.div, aten._softmax] triton_poi_fused__softmax_div_masked_fill_2.run(buf15, buf12, buf13, buf16, buf17, buf18, 256, grid=grid(256), stream=stream0) buf19 = reinterpret_tensor(buf17, (16, 4, 1), (4, 1, 1), 0); del buf17 # reuse # Topologically Sorted Source Nodes: [result_1], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf18, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf14, (16, 4, 1), (4, 1, 1), 0), out=buf19) buf20 = reinterpret_tensor(buf16, (16, 4), (4, 1), 0); del buf16 # reuse # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf19, (16, 4), (4, 1), 0), reinterpret_tensor(primals_21, (4, 4), (1, 4), 0), out=buf20) buf21 = reinterpret_tensor(buf20, (4, 4, 4), (16, 4, 1), 0); del buf20 # reuse # Topologically Sorted Source Nodes: [add_1], Original ATen: [aten.add] triton_poi_fused_add_5.run(buf21, primals_22, buf11, 64, grid=grid(64), stream=stream0) del primals_22 buf22 = buf9; del buf9 # reuse buf23 = buf10; del buf10 # reuse # Topologically Sorted Source Nodes: [layer_norm_1], Original ATen: [aten.native_layer_norm] triton_poi_fused_native_layer_norm_6.run(buf21, buf22, buf23, 16, grid=grid(16), stream=stream0) buf24 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [layer_norm_1], Original ATen: [aten.native_layer_norm] triton_poi_fused_native_layer_norm_7.run(buf21, buf22, buf23, primals_23, primals_24, buf24, 64, grid=grid(64), stream=stream0) del primals_24 buf25 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf24, (16, 4), (4, 1), 0), reinterpret_tensor(primals_25, (4, 4), (1, 4), 0), out=buf25) buf26 = reinterpret_tensor(buf25, (4, 4, 4), (16, 4, 1), 0); del buf25 # reuse buf32 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_8.run(buf26, primals_26, buf32, 64, grid=grid(64), stream=stream0) del primals_26 buf27 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf26, (16, 4), (4, 1), 0), reinterpret_tensor(primals_27, (4, 4), (1, 4), 0), out=buf27) buf28 = reinterpret_tensor(buf27, (4, 4, 4), (16, 4, 1), 0); del buf27 # reuse # Topologically Sorted Source Nodes: [add_2], Original ATen: [aten.add] triton_poi_fused_add_5.run(buf28, primals_28, buf24, 64, grid=grid(64), stream=stream0) del primals_28 buf29 = buf23; del buf23 # reuse buf30 = buf22; del buf22 # reuse # Topologically Sorted Source Nodes: [layer_norm_2], Original ATen: [aten.native_layer_norm] triton_poi_fused_native_layer_norm_6.run(buf28, buf29, buf30, 16, grid=grid(16), stream=stream0) buf31 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [layer_norm_2], Original ATen: [aten.native_layer_norm] triton_poi_fused_native_layer_norm_7.run(buf28, buf29, buf30, primals_29, primals_30, buf31, 64, grid=grid(64), stream=stream0) del buf29 del buf30 del primals_30 return (buf31, primals_1, primals_11, primals_23, primals_29, buf0, buf1, buf3, buf6, reinterpret_tensor(buf7, (16, 4), (4, 1), 0), buf8, reinterpret_tensor(buf11, (16, 4), (4, 1), 0), buf12, reinterpret_tensor(primals_13, (16, 4), (4, 1), 0), buf13, buf15, buf18, reinterpret_tensor(buf19, (16, 4), (4, 1), 0), buf21, reinterpret_tensor(buf24, (16, 4), (4, 1), 0), reinterpret_tensor(buf26, (16, 4), (4, 1), 0), buf28, primals_27, buf32, primals_25, primals_21, reinterpret_tensor(buf14, (16, 1, 4), (4, 1, 1), 0), primals_14, primals_9, reinterpret_tensor(buf2, (16, 1, 4), (4, 1, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_13 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_14 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_15 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_16 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_17 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_18 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_19 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_20 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_21 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_22 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_23 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_24 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_25 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_26 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_27 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_28 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_29 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_30 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch import torch.nn as nn import torch.nn.functional as F class ScaledDotProductAttention(nn.Module): def __init__(self): super(ScaledDotProductAttention, self).__init__() def forward(self, query, key, value, mask=None): _1, _2, query_sequence_length, _3 = query.size() batch_size, num_head, key_sequence_length, size_per_head = key.size() query = query.view(batch_size, num_head, query_sequence_length, size_per_head) key = key.view(batch_size, num_head, size_per_head, key_sequence_length ) attention_score = torch.einsum('abcd, abde -> abce', query, key) attention_score = attention_score / math.sqrt(size_per_head) if mask is not None: attention_score = attention_score.masked_fill(mask == 0, - 1000000000.0) attention_score = F.softmax(attention_score, dim=-1) result = attention_score @ value return result, attention_score class MultiHeadAttention(nn.Module): def __init__(self, model_dim, key_dim, value_dim, num_head): super(MultiHeadAttention, self).__init__() self.model_dim = model_dim self.key_dim = key_dim self.value_dim = value_dim self.num_head = num_head self.Wq = nn.Linear(model_dim, key_dim) self.Wk = nn.Linear(model_dim, key_dim) self.Wv = nn.Linear(model_dim, value_dim) self.attention = ScaledDotProductAttention() self.Wo = nn.Linear(value_dim, model_dim) def forward(self, query, key, value, mask=None): prj_query = self.Wq(query) prj_key = self.Wk(key) prj_value = self.Wv(value) multihead_query = self.multihead_split(prj_query) multihead_key = self.multihead_split(prj_key) multihead_value = self.multihead_split(prj_value) attention_output, _attention_score = self.attention(multihead_query, multihead_key, multihead_value, mask=mask) output = self.multihead_concat(attention_output) output = self.Wo(output) return output def multihead_split(self, tensor): batch_size, sequence_length, hidden_size = tensor.size() size_per_head = hidden_size // self.num_head return tensor.view(batch_size, self.num_head, sequence_length, size_per_head) def multihead_concat(self, tensor): batch_size, num_head, sequence_length, size_per_head = tensor.size() hidden_size = num_head * size_per_head return tensor.view(batch_size, sequence_length, hidden_size) class FeedForward(nn.Module): def __init__(self, model_dim, hidden_dim, drop_prob): super(FeedForward, self).__init__() self.model_dim = model_dim self.hidden_dim = hidden_dim self.drop_prob = drop_prob self.linearlayer1 = nn.Linear(model_dim, hidden_dim) self.linearlayer2 = nn.Linear(hidden_dim, model_dim) self.relu = nn.ReLU() self.dropout = nn.Dropout(drop_prob) def forward(self, tensor): tensor = self.dropout(self.relu(self.linearlayer1(tensor))) return self.linearlayer2(tensor) class DecoderLayer(nn.Module): def __init__(self, model_dim, key_dim, value_dim, hidden_dim, num_head, drop_prob): super(DecoderLayer, self).__init__() self.self_attention = MultiHeadAttention(model_dim, key_dim, value_dim, num_head) self.normalization1 = nn.LayerNorm(model_dim) self.dropout1 = nn.Dropout(drop_prob) self.enc_dec_attention = MultiHeadAttention(model_dim, key_dim, value_dim, num_head) self.normalization2 = nn.LayerNorm(model_dim) self.dropout2 = nn.Dropout(drop_prob) self.ffn = FeedForward(model_dim, hidden_dim, drop_prob) self.normalization3 = nn.LayerNorm(model_dim) self.dropout3 = nn.Dropout(drop_prob) def forward(self, dec_tensor, enc_tensor, source_mask, target_mask): residual = dec_tensor tensor = self.self_attention(query=dec_tensor, key=dec_tensor, value=dec_tensor, mask=target_mask) tensor = self.dropout1(self.normalization1(tensor + residual)) if enc_tensor is not None: residual = tensor tensor = self.enc_dec_attention(query=tensor, key=enc_tensor, value=enc_tensor, mask=source_mask) tensor = self.dropout2(self.normalization2(tensor + residual)) residual = tensor tensor = self.ffn(tensor) tensor = self.dropout3(self.normalization3(tensor + residual)) return tensor def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'model_dim': 4, 'key_dim': 4, 'value_dim': 4, 'hidden_dim': 4, 'num_head': 4, 'drop_prob': 0.5}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import math import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_eq_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.0 tmp2 = tmp0 == tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused__softmax_div_masked_fill_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + 4 * x2, xmask, eviction_policy='evict_last').to(tl .int1) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp2 = tl.load(in_ptr2 + 4 * x1, xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (1 + 4 * x2), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp9 = tl.load(in_ptr2 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr0 + (2 + 4 * x2), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp15 = tl.load(in_ptr2 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp20 = tl.load(in_ptr0 + (3 + 4 * x2), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp21 = tl.load(in_ptr2 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp3 = tmp1 * tmp2 tmp4 = 1.0 tmp5 = tmp3 * tmp4 tmp6 = -1000000000.0 tmp7 = tl.where(tmp0, tmp6, tmp5) tmp10 = tmp1 * tmp9 tmp11 = tmp10 * tmp4 tmp12 = tl.where(tmp8, tmp6, tmp11) tmp13 = triton_helpers.maximum(tmp7, tmp12) tmp16 = tmp1 * tmp15 tmp17 = tmp16 * tmp4 tmp18 = tl.where(tmp14, tmp6, tmp17) tmp19 = triton_helpers.maximum(tmp13, tmp18) tmp22 = tmp1 * tmp21 tmp23 = tmp22 * tmp4 tmp24 = tl.where(tmp20, tmp6, tmp23) tmp25 = triton_helpers.maximum(tmp19, tmp24) tmp26 = tmp7 - tmp25 tmp27 = tl_math.exp(tmp26) tmp28 = tmp12 - tmp25 tmp29 = tl_math.exp(tmp28) tmp30 = tmp27 + tmp29 tmp31 = tmp18 - tmp25 tmp32 = tl_math.exp(tmp31) tmp33 = tmp30 + tmp32 tmp34 = tmp24 - tmp25 tmp35 = tl_math.exp(tmp34) tmp36 = tmp33 + tmp35 tl.store(out_ptr0 + x2, tmp25, xmask) tl.store(out_ptr1 + x2, tmp36, xmask) @triton.jit def triton_poi_fused__softmax_div_masked_fill_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x4 = xindex // 4 x0 = xindex % 4 x2 = xindex // 16 tmp0 = tl.load(in_ptr0 + x3, xmask).to(tl.int1) tmp1 = tl.load(in_ptr1 + x4, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp8 = tl.load(in_ptr3 + x4, xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr4 + x4, xmask, eviction_policy='evict_last') tmp3 = tmp1 * tmp2 tmp4 = 1.0 tmp5 = tmp3 * tmp4 tmp6 = -1000000000.0 tmp7 = tl.where(tmp0, tmp6, tmp5) tmp9 = tmp7 - tmp8 tmp10 = tl_math.exp(tmp9) tmp12 = tmp10 / tmp11 tl.store(out_ptr0 + x3, tmp12, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_3(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + x0, tmp16, xmask) tl.store(out_ptr1 + x0, tmp28, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) @triton.jit def triton_poi_fused_add_5(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x2, xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_native_layer_norm_6(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_native_layer_norm_7(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_8(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_9, (4, 4), (4, 1)) assert_size_stride(primals_10, (4,), (1,)) assert_size_stride(primals_11, (4,), (1,)) assert_size_stride(primals_12, (4,), (1,)) assert_size_stride(primals_13, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_14, (4, 4), (4, 1)) assert_size_stride(primals_15, (4,), (1,)) assert_size_stride(primals_16, (4, 4), (4, 1)) assert_size_stride(primals_17, (4,), (1,)) assert_size_stride(primals_18, (4, 4), (4, 1)) assert_size_stride(primals_19, (4,), (1,)) assert_size_stride(primals_20, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_21, (4, 4), (4, 1)) assert_size_stride(primals_22, (4,), (1,)) assert_size_stride(primals_23, (4,), (1,)) assert_size_stride(primals_24, (4,), (1,)) assert_size_stride(primals_25, (4, 4), (4, 1)) assert_size_stride(primals_26, (4,), (1,)) assert_size_stride(primals_27, (4, 4), (4, 1)) assert_size_stride(primals_28, (4,), (1,)) assert_size_stride(primals_29, (4,), (1,)) assert_size_stride(primals_30, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_3, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_2 del primals_3 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf1) del primals_4 del primals_5 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf2) del primals_6 del primals_7 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_eq_0[grid(256)](primals_8, buf3, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_8 buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf5 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) triton_poi_fused__softmax_div_masked_fill_1[grid(64)](buf3, buf0, buf1, buf4, buf5, 64, XBLOCK=64, num_warps=1, num_stages=1) buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_div_masked_fill_2[grid(256)](buf3, buf0, buf1, buf4, buf5, buf6, 256, XBLOCK=128, num_warps=4, num_stages=1) buf7 = reinterpret_tensor(buf5, (16, 4, 1), (4, 1, 1), 0) del buf5 extern_kernels.bmm(reinterpret_tensor(buf6, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0), out=buf7) buf8 = reinterpret_tensor(buf4, (16, 4), (4, 1), 0) del buf4 extern_kernels.addmm(primals_10, reinterpret_tensor(buf7, (16, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf8) del primals_10 buf9 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf10 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) triton_poi_fused_add_native_layer_norm_3[grid(16)](buf8, primals_1, buf9, buf10, 16, XBLOCK=16, num_warps=1, num_stages=1) buf11 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_native_layer_norm_4[grid(64)](buf8, primals_1, buf9, buf10, primals_11, primals_12, buf11, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_12 buf12 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_15, reinterpret_tensor(buf11, (16, 4), (4, 1), 0), reinterpret_tensor(primals_14, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf12) del primals_15 buf13 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_17, reinterpret_tensor(primals_13, (16, 4), (4, 1), 0), reinterpret_tensor(primals_16, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf13) del primals_16 del primals_17 buf14 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_19, reinterpret_tensor(primals_13, (16, 4), (4, 1), 0), reinterpret_tensor(primals_18, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf14) del primals_18 del primals_19 buf15 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_eq_0[grid(256)](primals_20, buf15, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_20 buf16 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf17 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) triton_poi_fused__softmax_div_masked_fill_1[grid(64)](buf15, buf12, buf13, buf16, buf17, 64, XBLOCK=64, num_warps=1, num_stages=1) buf18 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_div_masked_fill_2[grid(256)](buf15, buf12, buf13, buf16, buf17, buf18, 256, XBLOCK=128, num_warps=4, num_stages=1) buf19 = reinterpret_tensor(buf17, (16, 4, 1), (4, 1, 1), 0) del buf17 extern_kernels.bmm(reinterpret_tensor(buf18, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf14, (16, 4, 1), (4, 1, 1), 0), out=buf19) buf20 = reinterpret_tensor(buf16, (16, 4), (4, 1), 0) del buf16 extern_kernels.mm(reinterpret_tensor(buf19, (16, 4), (4, 1), 0), reinterpret_tensor(primals_21, (4, 4), (1, 4), 0), out=buf20) buf21 = reinterpret_tensor(buf20, (4, 4, 4), (16, 4, 1), 0) del buf20 triton_poi_fused_add_5[grid(64)](buf21, primals_22, buf11, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_22 buf22 = buf9 del buf9 buf23 = buf10 del buf10 triton_poi_fused_native_layer_norm_6[grid(16)](buf21, buf22, buf23, 16, XBLOCK=16, num_warps=1, num_stages=1) buf24 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_native_layer_norm_7[grid(64)](buf21, buf22, buf23, primals_23, primals_24, buf24, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_24 buf25 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf24, (16, 4), (4, 1), 0), reinterpret_tensor(primals_25, (4, 4), (1, 4), 0), out=buf25) buf26 = reinterpret_tensor(buf25, (4, 4, 4), (16, 4, 1), 0) del buf25 buf32 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_8[grid(64)](buf26, primals_26, buf32, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_26 buf27 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf26, (16, 4), (4, 1), 0), reinterpret_tensor(primals_27, (4, 4), (1, 4), 0), out=buf27) buf28 = reinterpret_tensor(buf27, (4, 4, 4), (16, 4, 1), 0) del buf27 triton_poi_fused_add_5[grid(64)](buf28, primals_28, buf24, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_28 buf29 = buf23 del buf23 buf30 = buf22 del buf22 triton_poi_fused_native_layer_norm_6[grid(16)](buf28, buf29, buf30, 16, XBLOCK=16, num_warps=1, num_stages=1) buf31 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_native_layer_norm_7[grid(64)](buf28, buf29, buf30, primals_29, primals_30, buf31, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf29 del buf30 del primals_30 return (buf31, primals_1, primals_11, primals_23, primals_29, buf0, buf1, buf3, buf6, reinterpret_tensor(buf7, (16, 4), (4, 1), 0), buf8, reinterpret_tensor(buf11, (16, 4), (4, 1), 0), buf12, reinterpret_tensor(primals_13, (16, 4), (4, 1), 0), buf13, buf15, buf18, reinterpret_tensor(buf19, (16, 4), (4, 1), 0), buf21, reinterpret_tensor(buf24, (16, 4), (4, 1), 0), reinterpret_tensor( buf26, (16, 4), (4, 1), 0), buf28, primals_27, buf32, primals_25, primals_21, reinterpret_tensor(buf14, (16, 1, 4), (4, 1, 1), 0), primals_14, primals_9, reinterpret_tensor(buf2, (16, 1, 4), (4, 1, 1), 0)) class ScaledDotProductAttention(nn.Module): def __init__(self): super(ScaledDotProductAttention, self).__init__() def forward(self, query, key, value, mask=None): _1, _2, query_sequence_length, _3 = query.size() batch_size, num_head, key_sequence_length, size_per_head = key.size() query = query.view(batch_size, num_head, query_sequence_length, size_per_head) key = key.view(batch_size, num_head, size_per_head, key_sequence_length ) attention_score = torch.einsum('abcd, abde -> abce', query, key) attention_score = attention_score / math.sqrt(size_per_head) if mask is not None: attention_score = attention_score.masked_fill(mask == 0, - 1000000000.0) attention_score = F.softmax(attention_score, dim=-1) result = attention_score @ value return result, attention_score class MultiHeadAttention(nn.Module): def __init__(self, model_dim, key_dim, value_dim, num_head): super(MultiHeadAttention, self).__init__() self.model_dim = model_dim self.key_dim = key_dim self.value_dim = value_dim self.num_head = num_head self.Wq = nn.Linear(model_dim, key_dim) self.Wk = nn.Linear(model_dim, key_dim) self.Wv = nn.Linear(model_dim, value_dim) self.attention = ScaledDotProductAttention() self.Wo = nn.Linear(value_dim, model_dim) def forward(self, query, key, value, mask=None): prj_query = self.Wq(query) prj_key = self.Wk(key) prj_value = self.Wv(value) multihead_query = self.multihead_split(prj_query) multihead_key = self.multihead_split(prj_key) multihead_value = self.multihead_split(prj_value) attention_output, _attention_score = self.attention(multihead_query, multihead_key, multihead_value, mask=mask) output = self.multihead_concat(attention_output) output = self.Wo(output) return output def multihead_split(self, tensor): batch_size, sequence_length, hidden_size = tensor.size() size_per_head = hidden_size // self.num_head return tensor.view(batch_size, self.num_head, sequence_length, size_per_head) def multihead_concat(self, tensor): batch_size, num_head, sequence_length, size_per_head = tensor.size() hidden_size = num_head * size_per_head return tensor.view(batch_size, sequence_length, hidden_size) class FeedForward(nn.Module): def __init__(self, model_dim, hidden_dim, drop_prob): super(FeedForward, self).__init__() self.model_dim = model_dim self.hidden_dim = hidden_dim self.drop_prob = drop_prob self.linearlayer1 = nn.Linear(model_dim, hidden_dim) self.linearlayer2 = nn.Linear(hidden_dim, model_dim) self.relu = nn.ReLU() self.dropout = nn.Dropout(drop_prob) def forward(self, tensor): tensor = self.dropout(self.relu(self.linearlayer1(tensor))) return self.linearlayer2(tensor) class DecoderLayerNew(nn.Module): def __init__(self, model_dim, key_dim, value_dim, hidden_dim, num_head, drop_prob): super(DecoderLayerNew, self).__init__() self.self_attention = MultiHeadAttention(model_dim, key_dim, value_dim, num_head) self.normalization1 = nn.LayerNorm(model_dim) self.dropout1 = nn.Dropout(drop_prob) self.enc_dec_attention = MultiHeadAttention(model_dim, key_dim, value_dim, num_head) self.normalization2 = nn.LayerNorm(model_dim) self.dropout2 = nn.Dropout(drop_prob) self.ffn = FeedForward(model_dim, hidden_dim, drop_prob) self.normalization3 = nn.LayerNorm(model_dim) self.dropout3 = nn.Dropout(drop_prob) def forward(self, input_0, input_1, input_2, input_3): primals_2 = self.self_attention.Wq.weight primals_3 = self.self_attention.Wq.bias primals_4 = self.self_attention.Wk.weight primals_5 = self.self_attention.Wk.bias primals_6 = self.self_attention.Wv.weight primals_7 = self.self_attention.Wv.bias primals_9 = self.self_attention.Wo.weight primals_10 = self.self_attention.Wo.bias primals_11 = self.normalization1.weight primals_12 = self.normalization1.bias primals_14 = self.enc_dec_attention.Wq.weight primals_15 = self.enc_dec_attention.Wq.bias primals_16 = self.enc_dec_attention.Wk.weight primals_17 = self.enc_dec_attention.Wk.bias primals_18 = self.enc_dec_attention.Wv.weight primals_19 = self.enc_dec_attention.Wv.bias primals_21 = self.enc_dec_attention.Wo.weight primals_22 = self.enc_dec_attention.Wo.bias primals_23 = self.normalization2.weight primals_24 = self.normalization2.bias primals_25 = self.ffn.linearlayer1.weight primals_26 = self.ffn.linearlayer1.bias primals_27 = self.ffn.linearlayer2.weight primals_28 = self.ffn.linearlayer2.bias primals_29 = self.normalization3.weight primals_30 = self.normalization3.bias primals_1 = input_0 primals_13 = input_1 primals_8 = input_2 primals_20 = input_3 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30]) return output[0]
SeungoneKim/Transformer_implementation
DecoderLayer
false
1,082
[ "Apache-2.0" ]
0
a52bf552eb645fc9bfb812cc26842fc147d6c008
https://github.com/SeungoneKim/Transformer_implementation/tree/a52bf552eb645fc9bfb812cc26842fc147d6c008
import math import torch import torch.nn as nn import torch.nn.functional as F class ScaledDotProductAttention(nn.Module): def __init__(self): super().__init__() def forward(self, query, key, value, mask=None): _1, _2, query_sequence_length, _3 = query.size() batch_size, num_head, key_sequence_length, size_per_head = key.size() query = query.view(batch_size, num_head, query_sequence_length, size_per_head) key = key.view(batch_size, num_head, size_per_head, key_sequence_length ) attention_score = torch.einsum('abcd, abde -> abce', query, key) attention_score = attention_score / math.sqrt(size_per_head) if mask is not None: attention_score = attention_score.masked_fill(mask == 0, - 1000000000.0) attention_score = F.softmax(attention_score, dim=-1) result = attention_score @ value return result, attention_score class MultiHeadAttention(nn.Module): def __init__(self, model_dim, key_dim, value_dim, num_head): super().__init__() self.model_dim = model_dim self.key_dim = key_dim self.value_dim = value_dim self.num_head = num_head self.Wq = nn.Linear(model_dim, key_dim) self.Wk = nn.Linear(model_dim, key_dim) self.Wv = nn.Linear(model_dim, value_dim) self.attention = ScaledDotProductAttention() self.Wo = nn.Linear(value_dim, model_dim) def forward(self, query, key, value, mask=None): prj_query = self.Wq(query) prj_key = self.Wk(key) prj_value = self.Wv(value) multihead_query = self.multihead_split(prj_query) multihead_key = self.multihead_split(prj_key) multihead_value = self.multihead_split(prj_value) attention_output, _attention_score = self.attention(multihead_query, multihead_key, multihead_value, mask=mask) output = self.multihead_concat(attention_output) output = self.Wo(output) return output def multihead_split(self, tensor): batch_size, sequence_length, hidden_size = tensor.size() size_per_head = hidden_size // self.num_head return tensor.view(batch_size, self.num_head, sequence_length, size_per_head) def multihead_concat(self, tensor): batch_size, num_head, sequence_length, size_per_head = tensor.size() hidden_size = num_head * size_per_head return tensor.view(batch_size, sequence_length, hidden_size) class FeedForward(nn.Module): def __init__(self, model_dim, hidden_dim, drop_prob): super().__init__() self.model_dim = model_dim self.hidden_dim = hidden_dim self.drop_prob = drop_prob self.linearlayer1 = nn.Linear(model_dim, hidden_dim) self.linearlayer2 = nn.Linear(hidden_dim, model_dim) self.relu = nn.ReLU() self.dropout = nn.Dropout(drop_prob) def forward(self, tensor): tensor = self.dropout(self.relu(self.linearlayer1(tensor))) return self.linearlayer2(tensor) class Model(nn.Module): def __init__(self, model_dim, key_dim, value_dim, hidden_dim, num_head, drop_prob): super().__init__() self.self_attention = MultiHeadAttention(model_dim, key_dim, value_dim, num_head) self.normalization1 = nn.LayerNorm(model_dim) self.dropout1 = nn.Dropout(drop_prob) self.enc_dec_attention = MultiHeadAttention(model_dim, key_dim, value_dim, num_head) self.normalization2 = nn.LayerNorm(model_dim) self.dropout2 = nn.Dropout(drop_prob) self.ffn = FeedForward(model_dim, hidden_dim, drop_prob) self.normalization3 = nn.LayerNorm(model_dim) self.dropout3 = nn.Dropout(drop_prob) def forward(self, dec_tensor, enc_tensor, source_mask, target_mask): residual = dec_tensor tensor = self.self_attention(query=dec_tensor, key=dec_te # ... truncated (>4000 chars) for memory efficiency
TransitionUp
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/re/creleqpucbzvzrso3whbekvyjzfafblr33ygekztpuugjz5zfqbd.py # Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.cat] # Source node to ATen node mapping: # out_2 => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%slice_4, %primals_4], 1), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = (xindex // 16) % 8 x0 = xindex % 4 x1 = (xindex // 4) % 4 x3 = (xindex // 128) x4 = xindex % 16 x5 = xindex tmp0 = x2 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (20 + x0 + (9*x1) + (81*x2) + (324*x3)), tmp4 & xmask, other=0.0) tmp6 = tl.load(in_ptr1 + (x2), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype) tmp9 = tl.where(tmp4, tmp7, tmp8) tmp10 = tmp0 >= tmp3 tmp11 = tl.full([1], 8, tl.int64) tmp12 = tmp0 < tmp11 tmp13 = tl.load(in_ptr2 + (x4 + (16*((-4) + x2)) + (64*x3)), tmp10 & xmask, other=0.0) tmp14 = tl.where(tmp4, tmp9, tmp13) tl.store(out_ptr0 + (x5), tmp14, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 9, 9), (324, 81, 9, 1)) buf1 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(buf0, primals_2, primals_4, buf1, 512, grid=grid(512), stream=stream0) del buf0 del primals_2 del primals_4 return (buf1, primals_1, primals_3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn def center_crop(layer, max_height, max_width): _, _, h, w = layer.size() xy1 = (w - max_width) // 2 xy2 = (h - max_height) // 2 return layer[:, :, xy2:xy2 + max_height, xy1:xy1 + max_width] class TransitionUp(nn.Module): def __init__(self, in_channels, out_channels): super().__init__() self.convTrans = nn.ConvTranspose2d(in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=2, padding=0, bias=True) def forward(self, x, skip): out = self.convTrans(x) out = center_crop(out, skip.size(2), skip.size(3)) out = torch.cat([out, skip], 1) return out def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex // 16 % 8 x0 = xindex % 4 x1 = xindex // 4 % 4 x3 = xindex // 128 x4 = xindex % 16 x5 = xindex tmp0 = x2 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (20 + x0 + 9 * x1 + 81 * x2 + 324 * x3), tmp4 & xmask, other=0.0) tmp6 = tl.load(in_ptr1 + x2, tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype) tmp9 = tl.where(tmp4, tmp7, tmp8) tmp10 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp13 = tl.load(in_ptr2 + (x4 + 16 * (-4 + x2) + 64 * x3), tmp10 & xmask, other=0.0) tmp14 = tl.where(tmp4, tmp9, tmp13) tl.store(out_ptr0 + x5, tmp14, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 9, 9), (324, 81, 9, 1)) buf1 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(512)](buf0, primals_2, primals_4, buf1, 512, XBLOCK=256, num_warps=4, num_stages=1) del buf0 del primals_2 del primals_4 return buf1, primals_1, primals_3 def center_crop(layer, max_height, max_width): _, _, h, w = layer.size() xy1 = (w - max_width) // 2 xy2 = (h - max_height) // 2 return layer[:, :, xy2:xy2 + max_height, xy1:xy1 + max_width] class TransitionUpNew(nn.Module): def __init__(self, in_channels, out_channels): super().__init__() self.convTrans = nn.ConvTranspose2d(in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=2, padding=0, bias=True) def forward(self, input_0, input_1): primals_1 = self.convTrans.weight primals_2 = self.convTrans.bias primals_3 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
Sreehari-S/Tiramisu_DigestPath
TransitionUp
false
1,083
[ "Apache-2.0" ]
0
a884ee911bc60ce997996e0ec2e6036600ffcffa
https://github.com/Sreehari-S/Tiramisu_DigestPath/tree/a884ee911bc60ce997996e0ec2e6036600ffcffa
import torch import torch.nn as nn def center_crop(layer, max_height, max_width): _, _, h, w = layer.size() xy1 = (w - max_width) // 2 xy2 = (h - max_height) // 2 return layer[:, :, xy2:xy2 + max_height, xy1:xy1 + max_width] class Model(nn.Module): def __init__(self, in_channels, out_channels): super().__init__() self.convTrans = nn.ConvTranspose2d(in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=2, padding=0, bias=True) def forward(self, x, skip): out = self.convTrans(x) out = center_crop(out, skip.size(2), skip.size(3)) out = torch.cat([out, skip], 1) return out def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 4]
ConvBlock
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/of/cofwklo5nuhcnwyox2iqrwvifqkquaz5nm5gngbpzzb6xu4usmqd.py # Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul] # Source node to ATen node mapping: # mul => mul # Graph fragment: # %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, 0.23570226039551584), kwargs = {}) triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 0.23570226039551584 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/32/c32v7egt4mupqssam3gmac2qgv3ujprjybthsgweflmot256qqw7.py # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] # Source node to ATen node mapping: # conv2d => convolution # Graph fragment: # %convolution : [num_users=4] = call_function[target=torch.ops.aten.convolution.default](args = (%mul, %primals_2, %primals_3, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/4b/c4biasj4wzw4qmo6u767lcafgh6e6kct7c433onys3pjjjwfxxqj.py # Topologically Sorted Source Nodes: [x, pow_1, mean, add, sqrt, x_1, mul_1], Original ATen: [aten.leaky_relu, aten.pow, aten.mean, aten.add, aten.sqrt, aten.div, aten.mul] # Source node to ATen node mapping: # add => add # mean => mean # mul_1 => mul_2 # pow_1 => pow_1 # sqrt => sqrt # x => gt, mul_1, where # x_1 => div # Graph fragment: # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution, 0), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, 0.2), kwargs = {}) # %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt, %convolution, %mul_1), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%where, 2), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%pow_1, [1], True), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean, 1e-08), kwargs = {}) # %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%add,), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%where, %sqrt), kwargs = {}) # %mul_2 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, 0.23570226039551584), kwargs = {}) triton_poi_fused_add_div_leaky_relu_mean_mul_pow_sqrt_2 = async_compile.triton('triton_poi_fused_add_div_leaky_relu_mean_mul_pow_sqrt_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_leaky_relu_mean_mul_pow_sqrt_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_leaky_relu_mean_mul_pow_sqrt_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp6 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp23 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 0.2 tmp4 = tmp0 * tmp3 tmp5 = tl.where(tmp2, tmp0, tmp4) tmp7 = tmp6 > tmp1 tmp8 = tmp6 * tmp3 tmp9 = tl.where(tmp7, tmp6, tmp8) tmp10 = tmp9 * tmp9 tmp12 = tmp11 > tmp1 tmp13 = tmp11 * tmp3 tmp14 = tl.where(tmp12, tmp11, tmp13) tmp15 = tmp14 * tmp14 tmp16 = tmp10 + tmp15 tmp18 = tmp17 > tmp1 tmp19 = tmp17 * tmp3 tmp20 = tl.where(tmp18, tmp17, tmp19) tmp21 = tmp20 * tmp20 tmp22 = tmp16 + tmp21 tmp24 = tmp23 > tmp1 tmp25 = tmp23 * tmp3 tmp26 = tl.where(tmp24, tmp23, tmp25) tmp27 = tmp26 * tmp26 tmp28 = tmp22 + tmp27 tmp29 = 4.0 tmp30 = tmp28 / tmp29 tmp31 = 1e-08 tmp32 = tmp30 + tmp31 tmp33 = libdevice.sqrt(tmp32) tmp34 = tmp5 / tmp33 tmp35 = 0.23570226039551584 tmp36 = tmp34 * tmp35 tl.store(in_out_ptr0 + (x3), tmp36, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/7v/c7vm65ktpe4n23v24el4vlbfc7rafxngxnpi7ekdsaph6dnzyef4.py # Topologically Sorted Source Nodes: [x_2, pow_2, mean_1, add_1, sqrt_1, x_3], Original ATen: [aten.leaky_relu, aten.pow, aten.mean, aten.add, aten.sqrt, aten.div] # Source node to ATen node mapping: # add_1 => add_1 # mean_1 => mean_1 # pow_2 => pow_2 # sqrt_1 => sqrt_1 # x_2 => gt_1, mul_3, where_1 # x_3 => div_1 # Graph fragment: # %gt_1 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_1, 0), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_1, 0.2), kwargs = {}) # %where_1 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %convolution_1, %mul_3), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%where_1, 2), kwargs = {}) # %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%pow_2, [1], True), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean_1, 1e-08), kwargs = {}) # %sqrt_1 : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_1,), kwargs = {}) # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%where_1, %sqrt_1), kwargs = {}) triton_poi_fused_add_div_leaky_relu_mean_pow_sqrt_3 = async_compile.triton('triton_poi_fused_add_div_leaky_relu_mean_pow_sqrt_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_leaky_relu_mean_pow_sqrt_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_leaky_relu_mean_pow_sqrt_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp6 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp23 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 0.2 tmp4 = tmp0 * tmp3 tmp5 = tl.where(tmp2, tmp0, tmp4) tmp7 = tmp6 > tmp1 tmp8 = tmp6 * tmp3 tmp9 = tl.where(tmp7, tmp6, tmp8) tmp10 = tmp9 * tmp9 tmp12 = tmp11 > tmp1 tmp13 = tmp11 * tmp3 tmp14 = tl.where(tmp12, tmp11, tmp13) tmp15 = tmp14 * tmp14 tmp16 = tmp10 + tmp15 tmp18 = tmp17 > tmp1 tmp19 = tmp17 * tmp3 tmp20 = tl.where(tmp18, tmp17, tmp19) tmp21 = tmp20 * tmp20 tmp22 = tmp16 + tmp21 tmp24 = tmp23 > tmp1 tmp25 = tmp23 * tmp3 tmp26 = tl.where(tmp24, tmp23, tmp25) tmp27 = tmp26 * tmp26 tmp28 = tmp22 + tmp27 tmp29 = 4.0 tmp30 = tmp28 / tmp29 tmp31 = 1e-08 tmp32 = tmp30 + tmp31 tmp33 = libdevice.sqrt(tmp32) tmp34 = tmp5 / tmp33 tl.store(out_ptr0 + (x3), tmp34, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_mul_0.run(primals_1, buf0, 256, grid=grid(256), stream=stream0) del primals_1 # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1)) buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] triton_poi_fused_convolution_1.run(buf2, primals_3, 256, grid=grid(256), stream=stream0) del primals_3 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf4 = buf3; del buf3 # reuse # Topologically Sorted Source Nodes: [x, pow_1, mean, add, sqrt, x_1, mul_1], Original ATen: [aten.leaky_relu, aten.pow, aten.mean, aten.add, aten.sqrt, aten.div, aten.mul] triton_poi_fused_add_div_leaky_relu_mean_mul_pow_sqrt_2.run(buf4, buf2, 256, grid=grid(256), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] buf5 = extern_kernels.convolution(buf4, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf5, (4, 4, 4, 4), (64, 16, 4, 1)) buf6 = buf5; del buf5 # reuse # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] triton_poi_fused_convolution_1.run(buf6, primals_5, 256, grid=grid(256), stream=stream0) del primals_5 buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_2, pow_2, mean_1, add_1, sqrt_1, x_3], Original ATen: [aten.leaky_relu, aten.pow, aten.mean, aten.add, aten.sqrt, aten.div] triton_poi_fused_add_div_leaky_relu_mean_pow_sqrt_3.run(buf6, buf7, 256, grid=grid(256), stream=stream0) return (buf7, primals_2, primals_4, buf0, buf2, buf4, buf6, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.utils.data class WSConv2d(nn.Module): """ Weight scaled Conv2d (Equalized Learning Rate) Note that input is multiplied rather than changing weights this will have the same result. Inspired by: https://github.com/nvnbny/progressive_growing_of_gans/blob/master/modelUtils.py """ def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding=1, gain=2): super(WSConv2d, self).__init__() self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding) self.scale = (gain / self.conv.weight[0].numel()) ** 0.5 nn.init.normal_(self.conv.weight) nn.init.zeros_(self.conv.bias) def forward(self, x): return self.conv(x * self.scale) class PixelNorm(nn.Module): def __init__(self): super(PixelNorm, self).__init__() self.epsilon = 1e-08 def forward(self, x): return x / torch.sqrt(torch.mean(x ** 2, dim=1, keepdim=True) + self.epsilon) class ConvBlock(nn.Module): def __init__(self, in_channels, out_channels, use_pixelnorm=True): super(ConvBlock, self).__init__() self.use_pn = use_pixelnorm self.conv1 = WSConv2d(in_channels, out_channels) self.conv2 = WSConv2d(out_channels, out_channels) self.leaky = nn.LeakyReLU(0.2) self.pn = PixelNorm() def forward(self, x): x = self.leaky(self.conv1(x)) x = self.pn(x) if self.use_pn else x x = self.leaky(self.conv2(x)) x = self.pn(x) if self.use_pn else x return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.23570226039551584 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused_add_div_leaky_relu_mean_mul_pow_sqrt_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp6 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp11 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp17 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp23 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 0.2 tmp4 = tmp0 * tmp3 tmp5 = tl.where(tmp2, tmp0, tmp4) tmp7 = tmp6 > tmp1 tmp8 = tmp6 * tmp3 tmp9 = tl.where(tmp7, tmp6, tmp8) tmp10 = tmp9 * tmp9 tmp12 = tmp11 > tmp1 tmp13 = tmp11 * tmp3 tmp14 = tl.where(tmp12, tmp11, tmp13) tmp15 = tmp14 * tmp14 tmp16 = tmp10 + tmp15 tmp18 = tmp17 > tmp1 tmp19 = tmp17 * tmp3 tmp20 = tl.where(tmp18, tmp17, tmp19) tmp21 = tmp20 * tmp20 tmp22 = tmp16 + tmp21 tmp24 = tmp23 > tmp1 tmp25 = tmp23 * tmp3 tmp26 = tl.where(tmp24, tmp23, tmp25) tmp27 = tmp26 * tmp26 tmp28 = tmp22 + tmp27 tmp29 = 4.0 tmp30 = tmp28 / tmp29 tmp31 = 1e-08 tmp32 = tmp30 + tmp31 tmp33 = libdevice.sqrt(tmp32) tmp34 = tmp5 / tmp33 tmp35 = 0.23570226039551584 tmp36 = tmp34 * tmp35 tl.store(in_out_ptr0 + x3, tmp36, xmask) @triton.jit def triton_poi_fused_add_div_leaky_relu_mean_pow_sqrt_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp6 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp11 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp17 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp23 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 0.2 tmp4 = tmp0 * tmp3 tmp5 = tl.where(tmp2, tmp0, tmp4) tmp7 = tmp6 > tmp1 tmp8 = tmp6 * tmp3 tmp9 = tl.where(tmp7, tmp6, tmp8) tmp10 = tmp9 * tmp9 tmp12 = tmp11 > tmp1 tmp13 = tmp11 * tmp3 tmp14 = tl.where(tmp12, tmp11, tmp13) tmp15 = tmp14 * tmp14 tmp16 = tmp10 + tmp15 tmp18 = tmp17 > tmp1 tmp19 = tmp17 * tmp3 tmp20 = tl.where(tmp18, tmp17, tmp19) tmp21 = tmp20 * tmp20 tmp22 = tmp16 + tmp21 tmp24 = tmp23 > tmp1 tmp25 = tmp23 * tmp3 tmp26 = tl.where(tmp24, tmp23, tmp25) tmp27 = tmp26 * tmp26 tmp28 = tmp22 + tmp27 tmp29 = 4.0 tmp30 = tmp28 / tmp29 tmp31 = 1e-08 tmp32 = tmp30 + tmp31 tmp33 = libdevice.sqrt(tmp32) tmp34 = tmp5 / tmp33 tl.store(out_ptr0 + x3, tmp34, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(256)](primals_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1)) buf2 = buf1 del buf1 triton_poi_fused_convolution_1[grid(256)](buf2, primals_3, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_3 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf4 = buf3 del buf3 triton_poi_fused_add_div_leaky_relu_mean_mul_pow_sqrt_2[grid(256)](buf4 , buf2, 256, XBLOCK=256, num_warps=4, num_stages=1) buf5 = extern_kernels.convolution(buf4, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf5, (4, 4, 4, 4), (64, 16, 4, 1)) buf6 = buf5 del buf5 triton_poi_fused_convolution_1[grid(256)](buf6, primals_5, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_div_leaky_relu_mean_pow_sqrt_3[grid(256)](buf6, buf7, 256, XBLOCK=128, num_warps=4, num_stages=1) return buf7, primals_2, primals_4, buf0, buf2, buf4, buf6 class WSConv2d(nn.Module): """ Weight scaled Conv2d (Equalized Learning Rate) Note that input is multiplied rather than changing weights this will have the same result. Inspired by: https://github.com/nvnbny/progressive_growing_of_gans/blob/master/modelUtils.py """ def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding=1, gain=2): super(WSConv2d, self).__init__() self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding) self.scale = (gain / self.conv.weight[0].numel()) ** 0.5 nn.init.normal_(self.conv.weight) nn.init.zeros_(self.conv.bias) def forward(self, x): return self.conv(x * self.scale) class PixelNorm(nn.Module): def __init__(self): super(PixelNorm, self).__init__() self.epsilon = 1e-08 def forward(self, x): return x / torch.sqrt(torch.mean(x ** 2, dim=1, keepdim=True) + self.epsilon) class ConvBlockNew(nn.Module): def __init__(self, in_channels, out_channels, use_pixelnorm=True): super(ConvBlockNew, self).__init__() self.use_pn = use_pixelnorm self.conv1 = WSConv2d(in_channels, out_channels) self.conv2 = WSConv2d(out_channels, out_channels) self.leaky = nn.LeakyReLU(0.2) self.pn = PixelNorm() def forward(self, input_0): primals_2 = self.conv1.conv.weight primals_3 = self.conv1.conv.bias primals_4 = self.conv2.conv.weight primals_5 = self.conv2.conv.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
SongsLearning/Machine-Learning-Collection
ConvBlock
false
1,084
[ "MIT" ]
0
a8dff83969f67d37f70a89db06b851057d2da539
https://github.com/SongsLearning/Machine-Learning-Collection/tree/a8dff83969f67d37f70a89db06b851057d2da539
import torch import torch.nn as nn import torch.utils.data class WSConv2d(nn.Module): """ Weight scaled Conv2d (Equalized Learning Rate) Note that input is multiplied rather than changing weights this will have the same result. Inspired by: https://github.com/nvnbny/progressive_growing_of_gans/blob/master/modelUtils.py """ def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding=1, gain=2): super().__init__() self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding) self.scale = (gain / self.conv.weight[0].numel()) ** 0.5 nn.init.normal_(self.conv.weight) nn.init.zeros_(self.conv.bias) def forward(self, x): return self.conv(x * self.scale) class PixelNorm(nn.Module): def __init__(self): super().__init__() self.epsilon = 1e-08 def forward(self, x): return x / torch.sqrt(torch.mean(x ** 2, dim=1, keepdim=True) + self.epsilon) class Model(nn.Module): def __init__(self, in_channels, out_channels, use_pixelnorm=True): super().__init__() self.use_pn = use_pixelnorm self.conv1 = WSConv2d(in_channels, out_channels) self.conv2 = WSConv2d(out_channels, out_channels) self.leaky = nn.LeakyReLU(0.2) self.pn = PixelNorm() def forward(self, x): x = self.leaky(self.conv1(x)) x = self.pn(x) if self.use_pn else x x = self.leaky(self.conv2(x)) x = self.pn(x) if self.use_pn else x return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 4]
FcCat
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/ie/ciettq2a3562jfpgfe75iig4ki2hbm6pmbwujlvp6mw26i2odufm.py # Topologically Sorted Source Nodes: [out], Original ATen: [aten.cat] # Source node to ATen node mapping: # out => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_2, %view_1], 1), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 16) % 8 x0 = xindex % 16 x2 = (xindex // 128) x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + (16*x1) + (64*x2)), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.load(in_ptr1 + (x0 + (16*((-4) + x1)) + (64*x2)), tmp6 & xmask, other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + (x3), tmp10, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [out], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(primals_2, buf0, buf1, 512, grid=grid(512), stream=stream0) del buf0 return (buf1, reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class FcCat(nn.Module): def __init__(self, nIn, nOut): super(FcCat, self).__init__() self.fc = nn.Linear(nIn, nOut, bias=False) def forward(self, x): out = torch.cat((x, self.fc(x)), 1) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'nIn': 4, 'nOut': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 % 8 x0 = xindex % 16 x2 = xindex // 128 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (x0 + 16 * (-4 + x1) + 64 * x2), tmp6 & xmask, other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x3, tmp10, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(512)](primals_2, buf0, buf1, 512, XBLOCK=256, num_warps=4, num_stages=1) del buf0 return buf1, reinterpret_tensor(primals_2, (64, 4), (4, 1), 0) class FcCatNew(nn.Module): def __init__(self, nIn, nOut): super(FcCatNew, self).__init__() self.fc = nn.Linear(nIn, nOut, bias=False) def forward(self, input_0): primals_1 = self.fc.weight primals_2 = input_0 output = call([primals_1, primals_2]) return output[0]
Sreehari-S/Tiramisu_DigestPath
FcCat
false
1,086
[ "Apache-2.0" ]
0
a884ee911bc60ce997996e0ec2e6036600ffcffa
https://github.com/Sreehari-S/Tiramisu_DigestPath/tree/a884ee911bc60ce997996e0ec2e6036600ffcffa
import torch import torch.nn as nn class Model(nn.Module): def __init__(self, nIn, nOut): super().__init__() self.fc = nn.Linear(nIn, nOut, bias=False) def forward(self, x): out = torch.cat((x, self.fc(x)), 1) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 4]
WSConv2d
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/of/cofwklo5nuhcnwyox2iqrwvifqkquaz5nm5gngbpzzb6xu4usmqd.py # Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul] # Source node to ATen node mapping: # mul => mul # Graph fragment: # %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, 0.23570226039551584), kwargs = {}) triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 0.23570226039551584 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/32/c32v7egt4mupqssam3gmac2qgv3ujprjybthsgweflmot256qqw7.py # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] # Source node to ATen node mapping: # conv2d => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%mul, %primals_2, %primals_3, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_3, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_mul_0.run(primals_1, buf0, 256, grid=grid(256), stream=stream0) del primals_1 # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1)) buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] triton_poi_fused_convolution_1.run(buf2, primals_3, 256, grid=grid(256), stream=stream0) del primals_3 return (buf2, primals_2, buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.utils.data class WSConv2d(nn.Module): """ Weight scaled Conv2d (Equalized Learning Rate) Note that input is multiplied rather than changing weights this will have the same result. Inspired by: https://github.com/nvnbny/progressive_growing_of_gans/blob/master/modelUtils.py """ def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding=1, gain=2): super(WSConv2d, self).__init__() self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding) self.scale = (gain / self.conv.weight[0].numel()) ** 0.5 nn.init.normal_(self.conv.weight) nn.init.zeros_(self.conv.bias) def forward(self, x): return self.conv(x * self.scale) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.23570226039551584 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(256)](primals_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1)) buf2 = buf1 del buf1 triton_poi_fused_convolution_1[grid(256)](buf2, primals_3, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_3 return buf2, primals_2, buf0 class WSConv2dNew(nn.Module): """ Weight scaled Conv2d (Equalized Learning Rate) Note that input is multiplied rather than changing weights this will have the same result. Inspired by: https://github.com/nvnbny/progressive_growing_of_gans/blob/master/modelUtils.py """ def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding=1, gain=2): super(WSConv2dNew, self).__init__() self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding) self.scale = (gain / self.conv.weight[0].numel()) ** 0.5 nn.init.normal_(self.conv.weight) nn.init.zeros_(self.conv.bias) def forward(self, input_0): primals_2 = self.conv.weight primals_3 = self.conv.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
SongsLearning/Machine-Learning-Collection
WSConv2d
false
1,087
[ "MIT" ]
0
a8dff83969f67d37f70a89db06b851057d2da539
https://github.com/SongsLearning/Machine-Learning-Collection/tree/a8dff83969f67d37f70a89db06b851057d2da539
import torch import torch.nn as nn import torch.utils.data class Model(nn.Module): """ Weight scaled Conv2d (Equalized Learning Rate) Note that input is multiplied rather than changing weights this will have the same result. Inspired by: https://github.com/nvnbny/progressive_growing_of_gans/blob/master/modelUtils.py """ def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding=1, gain=2): super().__init__() self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding) self.scale = (gain / self.conv.weight[0].numel()) ** 0.5 nn.init.normal_(self.conv.weight) nn.init.zeros_(self.conv.bias) def forward(self, x): return self.conv(x * self.scale) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 4]
Standardscaler
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/ft/cft4ssfca465zguug7muc7b6b3q6agysmujv6njryw7rnc62phca.py # Topologically Sorted Source Nodes: [std_mean, sub, total], Original ATen: [aten.std_mean, aten.sub, aten.div] # Source node to ATen node mapping: # std_mean => sqrt, var_mean # sub => sub # total => div # Graph fragment: # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%arg0_1,), kwargs = {correction: 0.0}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %getitem_1), kwargs = {}) # %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%getitem,), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, %sqrt), kwargs = {}) triton_per_fused_div_std_mean_sub_0 = async_compile.triton('triton_per_fused_div_std_mean_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=(2,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_div_std_mean_sub_0', 'mutated_arg_names': [], 'no_x_dim': True, 'num_load': 1, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_div_std_mean_sub_0(in_ptr0, out_ptr2, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = tl.broadcast_to(tmp1, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = tl.full([1], 256, tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 / tmp7 tmp9 = tmp1 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tl.broadcast_to(tmp10, [RBLOCK]) tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0)) tmp14 = tmp0 - tmp8 tmp15 = 256.0 tmp16 = tmp13 / tmp15 tmp17 = libdevice.sqrt(tmp16) tmp18 = tmp14 / tmp17 tl.store(out_ptr2 + (tl.broadcast_to(r0, [RBLOCK])), tmp18, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [std_mean, sub, total], Original ATen: [aten.std_mean, aten.sub, aten.div] stream0 = get_raw_stream(0) triton_per_fused_div_std_mean_sub_0.run(arg0_1, buf3, 1, 256, grid=grid(1), stream=stream0) del arg0_1 return (buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch class Standardscaler(torch.nn.Module): def __init__(self): super().__init__() def forward(self, input_batch): std, mean = torch.std_mean(input_batch.type(torch.float32), unbiased=False) total = (input_batch - mean) / std return total def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_div_std_mean_sub_0(in_ptr0, out_ptr2, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = tl.broadcast_to(tmp1, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = tl.full([1], 256, tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 / tmp7 tmp9 = tmp1 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tl.broadcast_to(tmp10, [RBLOCK]) tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0)) tmp14 = tmp0 - tmp8 tmp15 = 256.0 tmp16 = tmp13 / tmp15 tmp17 = libdevice.sqrt(tmp16) tmp18 = tmp14 / tmp17 tl.store(out_ptr2 + tl.broadcast_to(r0, [RBLOCK]), tmp18, None) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_per_fused_div_std_mean_sub_0[grid(1)](arg0_1, buf3, 1, 256, num_warps=2, num_stages=1) del arg0_1 return buf3, class StandardscalerNew(torch.nn.Module): def __init__(self): super().__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
Stuksus/StandardScaler_for_pytorch
Standardscaler
false
1,088
[ "MIT" ]
0
27da9afd111007f20a615bee9a5a7ac272adb241
https://github.com/Stuksus/StandardScaler_for_pytorch/tree/27da9afd111007f20a615bee9a5a7ac272adb241
import torch class Model(torch.nn.Module): def __init__(self): super().__init__() def forward(self, input_batch): std, mean = torch.std_mean(input_batch.type(torch.float32), unbiased=False) total = (input_batch - mean) / std return total def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
FeatureResizer
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/66/c666qvr725wwogti7syalhhjsndtv2n5sxzt6zi4wlesyjxocpic.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.native_layer_norm] # Source node to ATen node mapping: # x_1 => add, rsqrt, var_mean # Graph fragment: # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_1, [3]), kwargs = {correction: 0, keepdim: True}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-12), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) triton_poi_fused_native_layer_norm_0 = async_compile.triton('triton_poi_fused_native_layer_norm_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-12 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + (x0), tmp8, xmask) tl.store(out_ptr1 + (x0), tmp23, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/lh/clhh73owbiuj4adasmetdqsot2nlmw2ljupnw2q4yt3du76mikww.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.native_layer_norm] # Source node to ATen node mapping: # x_1 => add, add_1, mul, mul_1, rsqrt, sub, var_mean # Graph fragment: # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_1, [3]), kwargs = {correction: 0, keepdim: True}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-12), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_1, %getitem_1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_4), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_5), kwargs = {}) triton_poi_fused_native_layer_norm_1 = async_compile.triton('triton_poi_fused_native_layer_norm_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, ), (1, )) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.addmm] extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.native_layer_norm] stream0 = get_raw_stream(0) triton_poi_fused_native_layer_norm_0.run(buf0, buf1, buf2, 64, grid=grid(64), stream=stream0) buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.native_layer_norm] triton_poi_fused_native_layer_norm_1.run(buf0, buf1, buf2, primals_4, primals_5, buf3, 256, grid=grid(256), stream=stream0) del buf1 del buf2 del primals_5 return (buf3, primals_4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.utils.data import torch from torch import nn class FeatureResizer(nn.Module): """ This class takes as input a set of embeddings of dimension C1 and outputs a set of embedding of dimension C2, after a linear transformation, dropout and normalization (LN). """ def __init__(self, input_feat_size, output_feat_size, dropout, do_ln=True): super().__init__() self.do_ln = do_ln self.fc = nn.Linear(input_feat_size, output_feat_size, bias=True) self.layer_norm = nn.LayerNorm(output_feat_size, eps=1e-12) self.dropout = nn.Dropout(dropout) def forward(self, encoder_features): x = self.fc(encoder_features) if self.do_ln: x = self.layer_norm(x) output = self.dropout(x) return output def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_feat_size': 4, 'output_feat_size': 4, 'dropout': 0.5}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.utils.data import torch from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-12 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) get_raw_stream(0) triton_poi_fused_native_layer_norm_0[grid(64)](buf0, buf1, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_native_layer_norm_1[grid(256)](buf0, buf1, buf2, primals_4, primals_5, buf3, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf1 del buf2 del primals_5 return buf3, primals_4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf0 class FeatureResizerNew(nn.Module): """ This class takes as input a set of embeddings of dimension C1 and outputs a set of embedding of dimension C2, after a linear transformation, dropout and normalization (LN). """ def __init__(self, input_feat_size, output_feat_size, dropout, do_ln=True): super().__init__() self.do_ln = do_ln self.fc = nn.Linear(input_feat_size, output_feat_size, bias=True) self.layer_norm = nn.LayerNorm(output_feat_size, eps=1e-12) self.dropout = nn.Dropout(dropout) def forward(self, input_0): primals_1 = self.fc.weight primals_2 = self.fc.bias primals_4 = self.layer_norm.weight primals_5 = self.layer_norm.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
Sudhir11292rt/DefVisTR
FeatureResizer
false
1,089
[ "Apache-2.0" ]
0
d52b2d88c10c6239de1c1ff851a743c58b708b75
https://github.com/Sudhir11292rt/DefVisTR/tree/d52b2d88c10c6239de1c1ff851a743c58b708b75
import torch import torch.utils.data import torch from torch import nn class Model(nn.Module): """ This class takes as input a set of embeddings of dimension C1 and outputs a set of embedding of dimension C2, after a linear transformation, dropout and normalization (LN). """ def __init__(self, input_feat_size, output_feat_size, dropout, do_ln=True): super().__init__() self.do_ln = do_ln self.fc = nn.Linear(input_feat_size, output_feat_size, bias=True) self.layer_norm = nn.LayerNorm(output_feat_size, eps=1e-12) self.dropout = nn.Dropout(dropout) def forward(self, encoder_features): x = self.fc(encoder_features) if self.do_ln: x = self.layer_norm(x) output = self.dropout(x) return output def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 4, 0.5]
UGRNNLRCell
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/id/cidxuzakshtvnrldbbqkyga2amymtgyw2cvpqv7d2kjchobiepep.py # Topologically Sorted Source Nodes: [pre_comp1, pre_comp2, add_2, z, add_3, c, mul, sub, mul_1, new_h], Original ATen: [aten.add, aten.sigmoid, aten.tanh, aten.mul, aten.rsub] # Source node to ATen node mapping: # add_2 => add_2 # add_3 => add_3 # c => tanh # mul => mul # mul_1 => mul_1 # new_h => add_4 # pre_comp1 => add # pre_comp2 => add_1 # sub => sub # z => sigmoid # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %view_5), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_3, %view_7), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %primals_7), kwargs = {}) # %sigmoid : [num_users=3] = call_function[target=torch.ops.aten.sigmoid.default](args = (%add_2,), kwargs = {}) # %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, %primals_8), kwargs = {}) # %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%add_3,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %primals_5), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %sigmoid), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %tanh), kwargs = {}) # %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {}) triton_poi_fused_add_mul_rsub_sigmoid_tanh_0 = async_compile.triton('triton_poi_fused_add_mul_rsub_sigmoid_tanh_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_rsub_sigmoid_tanh_0', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_rsub_sigmoid_tanh_0(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x2), xmask) tmp3 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_out_ptr1 + (x2), xmask) tmp7 = tl.load(in_ptr2 + (x2), xmask) tmp9 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + (x2), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp5 = tl.sigmoid(tmp4) tmp8 = tmp6 + tmp7 tmp10 = tmp8 + tmp9 tmp11 = libdevice.tanh(tmp10) tmp13 = tmp5 * tmp12 tmp14 = 1.0 tmp15 = tmp14 - tmp5 tmp16 = tmp15 * tmp11 tmp17 = tmp13 + tmp16 tl.store(in_out_ptr0 + (x2), tmp5, xmask) tl.store(in_out_ptr1 + (x2), tmp11, xmask) tl.store(out_ptr0 + (x2), tmp17, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (1, 4), (4, 1)) assert_size_stride(primals_8, (1, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [wComp1], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), primals_1, out=buf0) del primals_1 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [wComp2], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), primals_3, out=buf1) del primals_3 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [uComp1], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_5, (64, 4), (4, 1), 0), primals_4, out=buf2) del primals_4 buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [uComp2], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_5, (64, 4), (4, 1), 0), primals_6, out=buf3) del primals_6 buf4 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse buf5 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf1 # reuse buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [pre_comp1, pre_comp2, add_2, z, add_3, c, mul, sub, mul_1, new_h], Original ATen: [aten.add, aten.sigmoid, aten.tanh, aten.mul, aten.rsub] stream0 = get_raw_stream(0) triton_poi_fused_add_mul_rsub_sigmoid_tanh_0.run(buf4, buf5, buf2, primals_7, buf3, primals_8, primals_5, buf6, 256, grid=grid(256), stream=stream0) del buf2 del buf3 del primals_7 del primals_8 return (buf6, primals_5, buf4, buf5, reinterpret_tensor(primals_2, (4, 64), (1, 4), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.onnx from itertools import product as product def gen_nonlinearity(A, nonlinearity): """ Returns required activation for a tensor based on the inputs nonlinearity is either a callable or a value in ['tanh', 'sigmoid', 'relu', 'quantTanh', 'quantSigm', 'quantSigm4'] """ if nonlinearity == 'tanh': return torch.tanh(A) elif nonlinearity == 'sigmoid': return torch.sigmoid(A) elif nonlinearity == 'relu': return torch.relu(A, 0.0) elif nonlinearity == 'quantTanh': return torch.max(torch.min(A, torch.ones_like(A)), -1.0 * torch. ones_like(A)) elif nonlinearity == 'quantSigm': A = (A + 1.0) / 2.0 return torch.max(torch.min(A, torch.ones_like(A)), torch.zeros_like(A)) elif nonlinearity == 'quantSigm4': A = (A + 2.0) / 4.0 return torch.max(torch.min(A, torch.ones_like(A)), torch.zeros_like(A)) else: if not callable(nonlinearity): raise ValueError( 'nonlinearity is either a callable or a value ' + "['tanh', 'sigmoid', 'relu', 'quantTanh', " + "'quantSigm'") return nonlinearity(A) class RNNCell(nn.Module): def __init__(self, input_size, hidden_size, gate_nonlinearity, update_nonlinearity, num_W_matrices, num_U_matrices, num_biases, wRank=None, uRank=None, wSparsity=1.0, uSparsity=1.0): super(RNNCell, self).__init__() self._input_size = input_size self._hidden_size = hidden_size self._gate_nonlinearity = gate_nonlinearity self._update_nonlinearity = update_nonlinearity self._num_W_matrices = num_W_matrices self._num_U_matrices = num_U_matrices self._num_biases = num_biases self._num_weight_matrices = [self._num_W_matrices, self. _num_U_matrices, self._num_biases] self._wRank = wRank self._uRank = uRank self._wSparsity = wSparsity self._uSparsity = uSparsity self.oldmats = [] @property def state_size(self): return self._hidden_size @property def input_size(self): return self._input_size @property def output_size(self): return self._hidden_size @property def gate_nonlinearity(self): return self._gate_nonlinearity @property def update_nonlinearity(self): return self._update_nonlinearity @property def wRank(self): return self._wRank @property def uRank(self): return self._uRank @property def num_W_matrices(self): return self._num_W_matrices @property def num_U_matrices(self): return self._num_U_matrices @property def num_weight_matrices(self): return self._num_weight_matrices @property def name(self): raise NotImplementedError() def forward(self, input, state): raise NotImplementedError() def getVars(self): raise NotImplementedError() def get_model_size(self): """ Function to get aimed model size """ mats = self.getVars() endW = self._num_W_matrices endU = endW + self._num_U_matrices totalnnz = 2 for i in range(0, endW): mats[i].device totalnnz += utils.countNNZ(mats[i].cpu(), self._wSparsity) mats[i] for i in range(endW, endU): mats[i].device totalnnz += utils.countNNZ(mats[i].cpu(), self._uSparsity) mats[i] for i in range(endU, len(mats)): mats[i].device totalnnz += utils.countNNZ(mats[i].cpu(), False) mats[i] return totalnnz * 4 def copy_previous_UW(self): mats = self.getVars() num_mats = self._num_W_matrices + self._num_U_matrices if len(self.oldmats) != num_mats: for i in range(num_mats): self.oldmats.append(torch.FloatTensor()) for i in range(num_mats): self.oldmats[i] = torch.FloatTensor(mats[i].detach().clone()) def sparsify(self): mats = self.getVars() endW = self._num_W_matrices endU = endW + self._num_U_matrices for i in range(0, endW): mats[i] = utils.hardThreshold(mats[i], self._wSparsity) for i in range(endW, endU): mats[i] = utils.hardThreshold(mats[i], self._uSparsity) self.copy_previous_UW() def sparsifyWithSupport(self): mats = self.getVars() endU = self._num_W_matrices + self._num_U_matrices for i in range(0, endU): mats[i] = utils.supportBasedThreshold(mats[i], self.oldmats[i]) class UGRNNLRCell(RNNCell): """ UGRNN LR Cell with Both Full Rank and Low Rank Formulations Has multiple activation functions for the gates hidden_size = # hidden units gate_nonlinearity = nonlinearity for the gate can be chosen from [tanh, sigmoid, relu, quantTanh, quantSigm] update_nonlinearity = nonlinearity for final rnn update can be chosen from [tanh, sigmoid, relu, quantTanh, quantSigm] wRank = rank of W matrix (creates 3 matrices if not None else creates 2 matrices) uRank = rank of U matrix (creates 3 matrices if not None else creates 2 matrices) UGRNN architecture and compression techniques are found in UGRNN(LINK) paper Basic architecture is like: z_t = gate_nl(W1x_t + U1h_{t-1} + B_g) h_t^ = update_nl(W1x_t + U1h_{t-1} + B_h) h_t = z_t*h_{t-1} + (1-z_t)*h_t^ Wi and Ui can further parameterised into low rank version by Wi = matmul(W, W_i) and Ui = matmul(U, U_i) """ def __init__(self, input_size, hidden_size, gate_nonlinearity='sigmoid', update_nonlinearity='tanh', wRank=None, uRank=None, wSparsity=1.0, uSparsity=1.0, name='UGRNNLR'): super(UGRNNLRCell, self).__init__(input_size, hidden_size, gate_nonlinearity, update_nonlinearity, 2, 2, 2, wRank, uRank, wSparsity, uSparsity) if wRank is not None: self._num_W_matrices += 1 self._num_weight_matrices[0] = self._num_W_matrices if uRank is not None: self._num_U_matrices += 1 self._num_weight_matrices[1] = self._num_U_matrices self._name = name if wRank is None: self.W1 = nn.Parameter(0.1 * torch.randn([input_size, hidden_size]) ) self.W2 = nn.Parameter(0.1 * torch.randn([input_size, hidden_size]) ) else: self.W = nn.Parameter(0.1 * torch.randn([input_size, wRank])) self.W1 = nn.Parameter(0.1 * torch.randn([wRank, hidden_size])) self.W2 = nn.Parameter(0.1 * torch.randn([wRank, hidden_size])) if uRank is None: self.U1 = nn.Parameter(0.1 * torch.randn([hidden_size, hidden_size])) self.U2 = nn.Parameter(0.1 * torch.randn([hidden_size, hidden_size])) else: self.U = nn.Parameter(0.1 * torch.randn([hidden_size, uRank])) self.U1 = nn.Parameter(0.1 * torch.randn([uRank, hidden_size])) self.U2 = nn.Parameter(0.1 * torch.randn([uRank, hidden_size])) self.bias_gate = nn.Parameter(torch.ones([1, hidden_size])) self.bias_update = nn.Parameter(torch.ones([1, hidden_size])) self._device = self.bias_update.device @property def name(self): return self._name @property def cellType(self): return 'UGRNNLR' def forward(self, input, state): if self._wRank is None: wComp1 = torch.matmul(input, self.W1) wComp2 = torch.matmul(input, self.W2) else: wComp1 = torch.matmul(torch.matmul(input, self.W), self.W1) wComp2 = torch.matmul(torch.matmul(input, self.W), self.W2) if self._uRank is None: uComp1 = torch.matmul(state, self.U1) uComp2 = torch.matmul(state, self.U2) else: uComp1 = torch.matmul(torch.matmul(state, self.U), self.U1) uComp2 = torch.matmul(torch.matmul(state, self.U), self.U2) pre_comp1 = wComp1 + uComp1 pre_comp2 = wComp2 + uComp2 z = gen_nonlinearity(pre_comp1 + self.bias_gate, self. _gate_nonlinearity) c = gen_nonlinearity(pre_comp2 + self.bias_update, self. _update_nonlinearity) new_h = z * state + (1.0 - z) * c return new_h def getVars(self): Vars = [] if self._num_W_matrices == 2: Vars.extend([self.W1, self.W2]) else: Vars.extend([self.W, self.W1, self.W2]) if self._num_U_matrices == 2: Vars.extend([self.U1, self.U2]) else: Vars.extend([self.U, self.U1, self.U2]) Vars.extend([self.bias_gate, self.bias_update]) return Vars def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_size': 4, 'hidden_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.onnx from itertools import product as product assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_mul_rsub_sigmoid_tanh_0(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_out_ptr1 + x2, xmask) tmp7 = tl.load(in_ptr2 + x2, xmask) tmp9 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + x2, xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp5 = tl.sigmoid(tmp4) tmp8 = tmp6 + tmp7 tmp10 = tmp8 + tmp9 tmp11 = libdevice.tanh(tmp10) tmp13 = tmp5 * tmp12 tmp14 = 1.0 tmp15 = tmp14 - tmp5 tmp16 = tmp15 * tmp11 tmp17 = tmp13 + tmp16 tl.store(in_out_ptr0 + x2, tmp5, xmask) tl.store(in_out_ptr1 + x2, tmp11, xmask) tl.store(out_ptr0 + x2, tmp17, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (1, 4), (4, 1)) assert_size_stride(primals_8, (1, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), primals_1, out=buf0) del primals_1 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), primals_3, out=buf1) del primals_3 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_5, (64, 4), (4, 1), 0), primals_4, out=buf2) del primals_4 buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_5, (64, 4), (4, 1), 0), primals_6, out=buf3) del primals_6 buf4 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 buf5 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf1 buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mul_rsub_sigmoid_tanh_0[grid(256)](buf4, buf5, buf2, primals_7, buf3, primals_8, primals_5, buf6, 256, XBLOCK= 256, num_warps=4, num_stages=1) del buf2 del buf3 del primals_7 del primals_8 return buf6, primals_5, buf4, buf5, reinterpret_tensor(primals_2, (4, 64), (1, 4), 0) def gen_nonlinearity(A, nonlinearity): """ Returns required activation for a tensor based on the inputs nonlinearity is either a callable or a value in ['tanh', 'sigmoid', 'relu', 'quantTanh', 'quantSigm', 'quantSigm4'] """ if nonlinearity == 'tanh': return torch.tanh(A) elif nonlinearity == 'sigmoid': return torch.sigmoid(A) elif nonlinearity == 'relu': return torch.relu(A, 0.0) elif nonlinearity == 'quantTanh': return torch.max(torch.min(A, torch.ones_like(A)), -1.0 * torch. ones_like(A)) elif nonlinearity == 'quantSigm': A = (A + 1.0) / 2.0 return torch.max(torch.min(A, torch.ones_like(A)), torch.zeros_like(A)) elif nonlinearity == 'quantSigm4': A = (A + 2.0) / 4.0 return torch.max(torch.min(A, torch.ones_like(A)), torch.zeros_like(A)) else: if not callable(nonlinearity): raise ValueError( 'nonlinearity is either a callable or a value ' + "['tanh', 'sigmoid', 'relu', 'quantTanh', " + "'quantSigm'") return nonlinearity(A) class RNNCell(nn.Module): def __init__(self, input_size, hidden_size, gate_nonlinearity, update_nonlinearity, num_W_matrices, num_U_matrices, num_biases, wRank=None, uRank=None, wSparsity=1.0, uSparsity=1.0): super(RNNCell, self).__init__() self._input_size = input_size self._hidden_size = hidden_size self._gate_nonlinearity = gate_nonlinearity self._update_nonlinearity = update_nonlinearity self._num_W_matrices = num_W_matrices self._num_U_matrices = num_U_matrices self._num_biases = num_biases self._num_weight_matrices = [self._num_W_matrices, self. _num_U_matrices, self._num_biases] self._wRank = wRank self._uRank = uRank self._wSparsity = wSparsity self._uSparsity = uSparsity self.oldmats = [] @property def state_size(self): return self._hidden_size @property def input_size(self): return self._input_size @property def output_size(self): return self._hidden_size @property def gate_nonlinearity(self): return self._gate_nonlinearity @property def update_nonlinearity(self): return self._update_nonlinearity @property def wRank(self): return self._wRank @property def uRank(self): return self._uRank @property def num_W_matrices(self): return self._num_W_matrices @property def num_U_matrices(self): return self._num_U_matrices @property def num_weight_matrices(self): return self._num_weight_matrices @property def name(self): raise NotImplementedError() def forward(self, input, state): raise NotImplementedError() def getVars(self): raise NotImplementedError() def get_model_size(self): """ Function to get aimed model size """ mats = self.getVars() endW = self._num_W_matrices endU = endW + self._num_U_matrices totalnnz = 2 for i in range(0, endW): mats[i].device totalnnz += utils.countNNZ(mats[i].cpu(), self._wSparsity) mats[i] for i in range(endW, endU): mats[i].device totalnnz += utils.countNNZ(mats[i].cpu(), self._uSparsity) mats[i] for i in range(endU, len(mats)): mats[i].device totalnnz += utils.countNNZ(mats[i].cpu(), False) mats[i] return totalnnz * 4 def copy_previous_UW(self): mats = self.getVars() num_mats = self._num_W_matrices + self._num_U_matrices if len(self.oldmats) != num_mats: for i in range(num_mats): self.oldmats.append(torch.FloatTensor()) for i in range(num_mats): self.oldmats[i] = torch.FloatTensor(mats[i].detach().clone()) def sparsify(self): mats = self.getVars() endW = self._num_W_matrices endU = endW + self._num_U_matrices for i in range(0, endW): mats[i] = utils.hardThreshold(mats[i], self._wSparsity) for i in range(endW, endU): mats[i] = utils.hardThreshold(mats[i], self._uSparsity) self.copy_previous_UW() def sparsifyWithSupport(self): mats = self.getVars() endU = self._num_W_matrices + self._num_U_matrices for i in range(0, endU): mats[i] = utils.supportBasedThreshold(mats[i], self.oldmats[i]) class UGRNNLRCellNew(RNNCell): """ UGRNN LR Cell with Both Full Rank and Low Rank Formulations Has multiple activation functions for the gates hidden_size = # hidden units gate_nonlinearity = nonlinearity for the gate can be chosen from [tanh, sigmoid, relu, quantTanh, quantSigm] update_nonlinearity = nonlinearity for final rnn update can be chosen from [tanh, sigmoid, relu, quantTanh, quantSigm] wRank = rank of W matrix (creates 3 matrices if not None else creates 2 matrices) uRank = rank of U matrix (creates 3 matrices if not None else creates 2 matrices) UGRNN architecture and compression techniques are found in UGRNN(LINK) paper Basic architecture is like: z_t = gate_nl(W1x_t + U1h_{t-1} + B_g) h_t^ = update_nl(W1x_t + U1h_{t-1} + B_h) h_t = z_t*h_{t-1} + (1-z_t)*h_t^ Wi and Ui can further parameterised into low rank version by Wi = matmul(W, W_i) and Ui = matmul(U, U_i) """ def __init__(self, input_size, hidden_size, gate_nonlinearity='sigmoid', update_nonlinearity='tanh', wRank=None, uRank=None, wSparsity=1.0, uSparsity=1.0, name='UGRNNLR'): super(UGRNNLRCellNew, self).__init__(input_size, hidden_size, gate_nonlinearity, update_nonlinearity, 2, 2, 2, wRank, uRank, wSparsity, uSparsity) if wRank is not None: self._num_W_matrices += 1 self._num_weight_matrices[0] = self._num_W_matrices if uRank is not None: self._num_U_matrices += 1 self._num_weight_matrices[1] = self._num_U_matrices self._name = name if wRank is None: self.W1 = nn.Parameter(0.1 * torch.randn([input_size, hidden_size]) ) self.W2 = nn.Parameter(0.1 * torch.randn([input_size, hidden_size]) ) else: self.W = nn.Parameter(0.1 * torch.randn([input_size, wRank])) self.W1 = nn.Parameter(0.1 * torch.randn([wRank, hidden_size])) self.W2 = nn.Parameter(0.1 * torch.randn([wRank, hidden_size])) if uRank is None: self.U1 = nn.Parameter(0.1 * torch.randn([hidden_size, hidden_size])) self.U2 = nn.Parameter(0.1 * torch.randn([hidden_size, hidden_size])) else: self.U = nn.Parameter(0.1 * torch.randn([hidden_size, uRank])) self.U1 = nn.Parameter(0.1 * torch.randn([uRank, hidden_size])) self.U2 = nn.Parameter(0.1 * torch.randn([uRank, hidden_size])) self.bias_gate = nn.Parameter(torch.ones([1, hidden_size])) self.bias_update = nn.Parameter(torch.ones([1, hidden_size])) self._device = self.bias_update.device @property def name(self): return self._name @property def cellType(self): return 'UGRNNLR' def getVars(self): Vars = [] if self._num_W_matrices == 2: Vars.extend([self.W1, self.W2]) else: Vars.extend([self.W, self.W1, self.W2]) if self._num_U_matrices == 2: Vars.extend([self.U1, self.U2]) else: Vars.extend([self.U, self.U1, self.U2]) Vars.extend([self.bias_gate, self.bias_update]) return Vars def forward(self, input_0, input_1): primals_1 = self.W1 primals_3 = self.W2 primals_4 = self.U1 primals_6 = self.U2 primals_7 = self.bias_gate primals_8 = self.bias_update primals_2 = input_0 primals_5 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return output[0]
ShishirPatil/EdgeML-1
UGRNNLRCell
false
1,090
[ "MIT" ]
0
cbba9f8b989e545788427c004eb8450e7e4c1a21
https://github.com/ShishirPatil/EdgeML-1/tree/cbba9f8b989e545788427c004eb8450e7e4c1a21
import torch import torch.nn as nn import torch.onnx from itertools import product as product def gen_nonlinearity(A, nonlinearity): """ Returns required activation for a tensor based on the inputs nonlinearity is either a callable or a value in ['tanh', 'sigmoid', 'relu', 'quantTanh', 'quantSigm', 'quantSigm4'] """ if nonlinearity == 'tanh': return torch.tanh(A) elif nonlinearity == 'sigmoid': return torch.sigmoid(A) elif nonlinearity == 'relu': return torch.relu(A, 0.0) elif nonlinearity == 'quantTanh': return torch.max(torch.min(A, torch.ones_like(A)), -1.0 * torch. ones_like(A)) elif nonlinearity == 'quantSigm': A = (A + 1.0) / 2.0 return torch.max(torch.min(A, torch.ones_like(A)), torch.zeros_like(A)) elif nonlinearity == 'quantSigm4': A = (A + 2.0) / 4.0 return torch.max(torch.min(A, torch.ones_like(A)), torch.zeros_like(A)) else: if not callable(nonlinearity): raise ValueError( 'nonlinearity is either a callable or a value ' + "['tanh', 'sigmoid', 'relu', 'quantTanh', " + "'quantSigm'") return nonlinearity(A) class RNNCell(nn.Module): def __init__(self, input_size, hidden_size, gate_nonlinearity, update_nonlinearity, num_W_matrices, num_U_matrices, num_biases, wRank=None, uRank=None, wSparsity=1.0, uSparsity=1.0): super().__init__() self._input_size = input_size self._hidden_size = hidden_size self._gate_nonlinearity = gate_nonlinearity self._update_nonlinearity = update_nonlinearity self._num_W_matrices = num_W_matrices self._num_U_matrices = num_U_matrices self._num_biases = num_biases self._num_weight_matrices = [self._num_W_matrices, self. _num_U_matrices, self._num_biases] self._wRank = wRank self._uRank = uRank self._wSparsity = wSparsity self._uSparsity = uSparsity self.oldmats = [] @property def state_size(self): return self._hidden_size @property def input_size(self): return self._input_size @property def output_size(self): return self._hidden_size @property def gate_nonlinearity(self): return self._gate_nonlinearity @property def update_nonlinearity(self): return self._update_nonlinearity @property def wRank(self): return self._wRank @property def uRank(self): return self._uRank @property def num_W_matrices(self): return self._num_W_matrices @property def num_U_matrices(self): return self._num_U_matrices @property def num_weight_matrices(self): return self._num_weight_matrices @property def name(self): raise NotImplementedError() def forward(self, input, state): raise NotImplementedError() def getVars(self): raise NotImplementedError() def get_model_size(self): """ Function to get aimed model size """ mats = self.getVars() endW = self._num_W_matrices endU = endW + self._num_U_matrices totalnnz = 2 for i in range(0, endW): mats[i].device totalnnz += utils.countNNZ(mats[i].cpu(), self._wSparsity) mats[i] for i in range(endW, endU): mats[i].device totalnnz += utils.countNNZ(mats[i].cpu(), self._uSparsity) mats[i] for i in range(endU, len(mats)): mats[i].device totalnnz += utils.countNNZ(mats[i].cpu(), False) mats[i] return totalnnz * 4 def copy_previous_UW(self): mats = self.getVars() num_mats = self._num_W_matrices + self._num_U_matrices if len(self.oldmats) != num_mats: for i in range(num_mats): self.oldmats.append(to # ... truncated (>4000 chars) for memory efficiency
FCLayer
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/dn/cdnhr6ixjduuhci57kobqjnehjrl22mcyjqzuuhvtxxshy437diy.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.tanh] # Source node to ATen node mapping: # x_1 => tanh # Graph fragment: # %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%primals_1,), kwargs = {}) triton_poi_fused_tanh_0 = async_compile.triton('triton_poi_fused_tanh_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_tanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = libdevice.tanh(tmp0) tl.store(out_ptr0 + (x0), tmp1, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.tanh] stream0 = get_raw_stream(0) triton_poi_fused_tanh_0.run(primals_1, buf0, 256, grid=grid(256), stream=stream0) del primals_1 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm] extern_kernels.addmm(primals_3, reinterpret_tensor(buf0, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_2 del primals_3 return (reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(buf0, (64, 4), (4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class FCLayer(nn.Module): def __init__(self, input_dim, output_dim, dropout_rate=0.0, use_activation=True): super(FCLayer, self).__init__() self.use_activation = use_activation self.dropout = nn.Dropout(dropout_rate) self.linear = nn.Linear(input_dim, output_dim) self.tanh = nn.Tanh() def forward(self, x): x = self.dropout(x) if self.use_activation: x = self.tanh(x) return self.linear(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_dim': 4, 'output_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_tanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = libdevice.tanh(tmp0) tl.store(out_ptr0 + x0, tmp1, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_tanh_0[grid(256)](primals_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_3, reinterpret_tensor(buf0, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_2 del primals_3 return reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(buf0, (64, 4), (4, 1), 0) class FCLayerNew(nn.Module): def __init__(self, input_dim, output_dim, dropout_rate=0.0, use_activation=True): super(FCLayerNew, self).__init__() self.use_activation = use_activation self.dropout = nn.Dropout(dropout_rate) self.linear = nn.Linear(input_dim, output_dim) self.tanh = nn.Tanh() def forward(self, input_0): primals_2 = self.linear.weight primals_3 = self.linear.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
StevenChaoo/R-BERT-DDI
FCLayer
false
1,091
[ "MIT" ]
0
6d9666e0bc61397ca942ffad53653690c1e8a899
https://github.com/StevenChaoo/R-BERT-DDI/tree/6d9666e0bc61397ca942ffad53653690c1e8a899
import torch import torch.nn as nn class Model(nn.Module): def __init__(self, input_dim, output_dim, dropout_rate=0.0, use_activation=True): super().__init__() self.use_activation = use_activation self.dropout = nn.Dropout(dropout_rate) self.linear = nn.Linear(input_dim, output_dim) self.tanh = nn.Tanh() def forward(self, x): x = self.dropout(x) if self.use_activation: x = self.tanh(x) return self.linear(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 4]
MultiHeadAttention
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/cu/ccutvo2v4333pq6xhrg2zryqqwthm7dmmuqprvva2xdwiodpz5jn.py # Topologically Sorted Source Nodes: [q], Original ATen: [aten.convolution] # Source node to ATen node mapping: # q => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1], [0], [1], False, [0], 1), kwargs = {}) triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 4) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/s2/cs2rk3o3kmhydx4oijp6rsdb5atcrq5axy4adadrpl7gkt7scies.py # Topologically Sorted Source Nodes: [p_attn], Original ATen: [aten._softmax] # Source node to ATen node mapping: # p_attn => exp # Graph fragment: # %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_5, 1), kwargs = {}) # %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [-1], True), kwargs = {}) # %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {}) # %div_tensor : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor, 1.0), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor,), kwargs = {}) triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp3 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = tmp14 * tmp1 tmp16 = tl_math.exp(tmp15) tl.store(out_ptr0 + (x2), tmp16, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/3f/c3fx6bzkalkw7u7askqdnz4rzlcoyqiec4r434sjc5x3axxgkrmr.py # Topologically Sorted Source Nodes: [p_attn], Original ATen: [aten._softmax] # Source node to ATen node mapping: # p_attn => div_1, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10 = args args.clear() assert_size_stride(primals_1, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_7, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_8, (4, ), (1, )) assert_size_stride(primals_9, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_10, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [q], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4), (16, 4, 1)) # Topologically Sorted Source Nodes: [k], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(primals_6, primals_4, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4), (16, 4, 1)) # Topologically Sorted Source Nodes: [v], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(primals_6, primals_7, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 4), (16, 4, 1)) buf3 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [q], Original ATen: [aten.convolution] stream0 = get_raw_stream(0) triton_poi_fused_convolution_0.run(buf3, primals_2, 64, grid=grid(64), stream=stream0) del primals_2 buf4 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [k], Original ATen: [aten.convolution] triton_poi_fused_convolution_0.run(buf4, primals_5, 64, grid=grid(64), stream=stream0) del primals_5 buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5) buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [p_attn], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf5, buf6, 256, grid=grid(256), stream=stream0) buf7 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf5 # reuse # Topologically Sorted Source Nodes: [p_attn], Original ATen: [aten._softmax] triton_poi_fused__softmax_2.run(buf6, buf7, 256, grid=grid(256), stream=stream0) del buf6 buf8 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [v], Original ATen: [aten.convolution] triton_poi_fused_convolution_0.run(buf8, primals_8, 64, grid=grid(64), stream=stream0) del primals_8 buf9 = empty_strided_cuda((16, 4, 1), (4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [output], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf8, (16, 4, 1), (4, 1, 0), 0), out=buf9) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] buf10 = extern_kernels.convolution(reinterpret_tensor(buf9, (4, 4, 4), (16, 4, 1), 0), primals_9, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf10, (4, 4, 4), (16, 4, 1)) buf11 = buf10; del buf10 # reuse # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] triton_poi_fused_convolution_0.run(buf11, primals_10, 64, grid=grid(64), stream=stream0) del primals_10 return (buf11, buf7, primals_1, primals_3, primals_4, primals_6, primals_7, primals_9, buf7, reinterpret_tensor(buf9, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf8, (16, 1, 4), (4, 4, 1), 0), reinterpret_tensor(buf3, (16, 1, 4), (4, 4, 1), 0), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch import numpy as np def convert_pad_shape(pad_shape): """Reverse, then flatten a list of lists.""" l = pad_shape[::-1] pad_shape = [item for sublist in l for item in sublist] return pad_shape class BaseModule(torch.nn.Module): def __init__(self): super(BaseModule, self).__init__() @property def nparams(self): """ Returns number of trainable parameters of the module. """ num_params = 0 for name, param in self.named_parameters(): if param.requires_grad: num_params += np.prod(param.detach().cpu().numpy().shape) return num_params def relocate_input(self, x: 'list'): """ Relocates provided tensors to the same device set for the module. """ device = next(self.parameters()).device for i in range(len(x)): if isinstance(x[i], torch.Tensor) and x[i].device != device: x[i] = x[i] return x class MultiHeadAttention(BaseModule): def __init__(self, channels, out_channels, n_heads, window_size=None, heads_share=True, p_dropout=0.0, proximal_bias=False, proximal_init =False): super(MultiHeadAttention, self).__init__() assert channels % n_heads == 0 self.channels = channels self.out_channels = out_channels self.n_heads = n_heads self.window_size = window_size self.heads_share = heads_share self.proximal_bias = proximal_bias self.p_dropout = p_dropout self.attn = None self.k_channels = channels // n_heads self.conv_q = torch.nn.Conv1d(channels, channels, 1) self.conv_k = torch.nn.Conv1d(channels, channels, 1) self.conv_v = torch.nn.Conv1d(channels, channels, 1) if window_size is not None: n_heads_rel = 1 if heads_share else n_heads rel_stddev = self.k_channels ** -0.5 self.emb_rel_k = torch.nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev) self.emb_rel_v = torch.nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev) self.conv_o = torch.nn.Conv1d(channels, out_channels, 1) self.drop = torch.nn.Dropout(p_dropout) torch.nn.init.xavier_uniform_(self.conv_q.weight) torch.nn.init.xavier_uniform_(self.conv_k.weight) if proximal_init: self.conv_k.weight.data.copy_(self.conv_q.weight.data) self.conv_k.bias.data.copy_(self.conv_q.bias.data) torch.nn.init.xavier_uniform_(self.conv_v.weight) def forward(self, x, c, attn_mask=None): q = self.conv_q(x) k = self.conv_k(c) v = self.conv_v(c) x, self.attn = self.attention(q, k, v, mask=attn_mask) x = self.conv_o(x) return x def attention(self, query, key, value, mask=None): b, d, t_s, t_t = *key.size(), query.size(2) query = query.view(b, self.n_heads, self.k_channels, t_t).transpose( 2, 3) key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) value = value.view(b, self.n_heads, self.k_channels, t_s).transpose( 2, 3) scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(self .k_channels) if self.window_size is not None: assert t_s == t_t, 'Relative attention is only available for self-attention.' key_relative_embeddings = self._get_relative_embeddings(self. emb_rel_k, t_s) rel_logits = self._matmul_with_relative_keys(query, key_relative_embeddings) rel_logits = self._relative_position_to_absolute_position( rel_logits) scores_local = rel_logits / math.sqrt(self.k_channels) scores = scores + scores_local if self.proximal_bias: assert t_s == t_t, 'Proximal bias is only available for self-attention.' scores = scores + self._attention_bias_proximal(t_s) if mask is not None: scores = scores.masked_fill(mask == 0, -10000.0) p_attn = torch.nn.functional.softmax(scores, dim=-1) p_attn = self.drop(p_attn) output = torch.matmul(p_attn, value) if self.window_size is not None: relative_weights = self._absolute_position_to_relative_position( p_attn) value_relative_embeddings = self._get_relative_embeddings(self. emb_rel_v, t_s) output = output + self._matmul_with_relative_values( relative_weights, value_relative_embeddings) output = output.transpose(2, 3).contiguous().view(b, d, t_t) return output, p_attn def _matmul_with_relative_values(self, x, y): ret = torch.matmul(x, y.unsqueeze(0)) return ret def _matmul_with_relative_keys(self, x, y): ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1)) return ret def _get_relative_embeddings(self, relative_embeddings, length): pad_length = max(length - (self.window_size + 1), 0) slice_start_position = max(self.window_size + 1 - length, 0) slice_end_position = slice_start_position + 2 * length - 1 if pad_length > 0: padded_relative_embeddings = torch.nn.functional.pad( relative_embeddings, convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]])) else: padded_relative_embeddings = relative_embeddings used_relative_embeddings = padded_relative_embeddings[:, slice_start_position:slice_end_position] return used_relative_embeddings def _relative_position_to_absolute_position(self, x): batch, heads, length, _ = x.size() x = torch.nn.functional.pad(x, convert_pad_shape([[0, 0], [0, 0], [ 0, 0], [0, 1]])) x_flat = x.view([batch, heads, length * 2 * length]) x_flat = torch.nn.functional.pad(x_flat, convert_pad_shape([[0, 0], [0, 0], [0, length - 1]])) x_final = x_flat.view([batch, heads, length + 1, 2 * length - 1])[:, :, :length, length - 1:] return x_final def _absolute_position_to_relative_position(self, x): batch, heads, length, _ = x.size() x = torch.nn.functional.pad(x, convert_pad_shape([[0, 0], [0, 0], [ 0, 0], [0, length - 1]])) x_flat = x.view([batch, heads, length ** 2 + length * (length - 1)]) x_flat = torch.nn.functional.pad(x_flat, convert_pad_shape([[0, 0], [0, 0], [length, 0]])) x_final = x_flat.view([batch, heads, length, 2 * length])[:, :, :, 1:] return x_final def _attention_bias_proximal(self, length): r = torch.arange(length, dtype=torch.float32) diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1) return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff) ), 0), 0) def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'channels': 4, 'out_channels': 4, 'n_heads': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import math import numpy as np assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = tmp14 * tmp1 tmp16 = tl_math.exp(tmp15) tl.store(out_ptr0 + x2, tmp16, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10) = args args.clear() assert_size_stride(primals_1, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_7, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_10, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4), (16, 4, 1)) buf1 = extern_kernels.convolution(primals_6, primals_4, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4), (16, 4, 1)) buf2 = extern_kernels.convolution(primals_6, primals_7, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 4), (16, 4, 1)) buf3 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(64)](buf3, primals_2, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_2 buf4 = buf1 del buf1 triton_poi_fused_convolution_0[grid(64)](buf4, primals_5, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_5 buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5) buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_1[grid(256)](buf5, buf6, 256, XBLOCK=256, num_warps=4, num_stages=1) buf7 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf5 triton_poi_fused__softmax_2[grid(256)](buf6, buf7, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf6 buf8 = buf2 del buf2 triton_poi_fused_convolution_0[grid(64)](buf8, primals_8, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_8 buf9 = empty_strided_cuda((16, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf8, (16, 4, 1), (4, 1, 0), 0), out=buf9) buf10 = extern_kernels.convolution(reinterpret_tensor(buf9, (4, 4, 4), (16, 4, 1), 0), primals_9, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf10, (4, 4, 4), (16, 4, 1)) buf11 = buf10 del buf10 triton_poi_fused_convolution_0[grid(64)](buf11, primals_10, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_10 return (buf11, buf7, primals_1, primals_3, primals_4, primals_6, primals_7, primals_9, buf7, reinterpret_tensor(buf9, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf8, (16, 1, 4), (4, 4, 1), 0), reinterpret_tensor(buf3, (16, 1, 4), (4, 4, 1), 0), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0)) def convert_pad_shape(pad_shape): """Reverse, then flatten a list of lists.""" l = pad_shape[::-1] pad_shape = [item for sublist in l for item in sublist] return pad_shape class BaseModule(torch.nn.Module): def __init__(self): super(BaseModule, self).__init__() @property def nparams(self): """ Returns number of trainable parameters of the module. """ num_params = 0 for name, param in self.named_parameters(): if param.requires_grad: num_params += np.prod(param.detach().cpu().numpy().shape) return num_params def relocate_input(self, x: 'list'): """ Relocates provided tensors to the same device set for the module. """ device = next(self.parameters()).device for i in range(len(x)): if isinstance(x[i], torch.Tensor) and x[i].device != device: x[i] = x[i] return x class MultiHeadAttentionNew(BaseModule): def __init__(self, channels, out_channels, n_heads, window_size=None, heads_share=True, p_dropout=0.0, proximal_bias=False, proximal_init =False): super(MultiHeadAttentionNew, self).__init__() assert channels % n_heads == 0 self.channels = channels self.out_channels = out_channels self.n_heads = n_heads self.window_size = window_size self.heads_share = heads_share self.proximal_bias = proximal_bias self.p_dropout = p_dropout self.attn = None self.k_channels = channels // n_heads self.conv_q = torch.nn.Conv1d(channels, channels, 1) self.conv_k = torch.nn.Conv1d(channels, channels, 1) self.conv_v = torch.nn.Conv1d(channels, channels, 1) if window_size is not None: n_heads_rel = 1 if heads_share else n_heads rel_stddev = self.k_channels ** -0.5 self.emb_rel_k = torch.nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev) self.emb_rel_v = torch.nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev) self.conv_o = torch.nn.Conv1d(channels, out_channels, 1) self.drop = torch.nn.Dropout(p_dropout) torch.nn.init.xavier_uniform_(self.conv_q.weight) torch.nn.init.xavier_uniform_(self.conv_k.weight) if proximal_init: self.conv_k.weight.data.copy_(self.conv_q.weight.data) self.conv_k.bias.data.copy_(self.conv_q.bias.data) torch.nn.init.xavier_uniform_(self.conv_v.weight) def attention(self, query, key, value, mask=None): b, d, t_s, t_t = *key.size(), query.size(2) query = query.view(b, self.n_heads, self.k_channels, t_t).transpose( 2, 3) key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) value = value.view(b, self.n_heads, self.k_channels, t_s).transpose( 2, 3) scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(self .k_channels) if self.window_size is not None: assert t_s == t_t, 'Relative attention is only available for self-attention.' key_relative_embeddings = self._get_relative_embeddings(self. emb_rel_k, t_s) rel_logits = self._matmul_with_relative_keys(query, key_relative_embeddings) rel_logits = self._relative_position_to_absolute_position( rel_logits) scores_local = rel_logits / math.sqrt(self.k_channels) scores = scores + scores_local if self.proximal_bias: assert t_s == t_t, 'Proximal bias is only available for self-attention.' scores = scores + self._attention_bias_proximal(t_s) if mask is not None: scores = scores.masked_fill(mask == 0, -10000.0) p_attn = torch.nn.functional.softmax(scores, dim=-1) p_attn = self.drop(p_attn) output = torch.matmul(p_attn, value) if self.window_size is not None: relative_weights = self._absolute_position_to_relative_position( p_attn) value_relative_embeddings = self._get_relative_embeddings(self. emb_rel_v, t_s) output = output + self._matmul_with_relative_values( relative_weights, value_relative_embeddings) output = output.transpose(2, 3).contiguous().view(b, d, t_t) return output, p_attn def _matmul_with_relative_values(self, x, y): ret = torch.matmul(x, y.unsqueeze(0)) return ret def _matmul_with_relative_keys(self, x, y): ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1)) return ret def _get_relative_embeddings(self, relative_embeddings, length): pad_length = max(length - (self.window_size + 1), 0) slice_start_position = max(self.window_size + 1 - length, 0) slice_end_position = slice_start_position + 2 * length - 1 if pad_length > 0: padded_relative_embeddings = torch.nn.functional.pad( relative_embeddings, convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]])) else: padded_relative_embeddings = relative_embeddings used_relative_embeddings = padded_relative_embeddings[:, slice_start_position:slice_end_position] return used_relative_embeddings def _relative_position_to_absolute_position(self, x): batch, heads, length, _ = x.size() x = torch.nn.functional.pad(x, convert_pad_shape([[0, 0], [0, 0], [ 0, 0], [0, 1]])) x_flat = x.view([batch, heads, length * 2 * length]) x_flat = torch.nn.functional.pad(x_flat, convert_pad_shape([[0, 0], [0, 0], [0, length - 1]])) x_final = x_flat.view([batch, heads, length + 1, 2 * length - 1])[:, :, :length, length - 1:] return x_final def _absolute_position_to_relative_position(self, x): batch, heads, length, _ = x.size() x = torch.nn.functional.pad(x, convert_pad_shape([[0, 0], [0, 0], [ 0, 0], [0, length - 1]])) x_flat = x.view([batch, heads, length ** 2 + length * (length - 1)]) x_flat = torch.nn.functional.pad(x_flat, convert_pad_shape([[0, 0], [0, 0], [length, 0]])) x_final = x_flat.view([batch, heads, length, 2 * length])[:, :, :, 1:] return x_final def _attention_bias_proximal(self, length): r = torch.arange(length, dtype=torch.float32) diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1) return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff) ), 0), 0) def forward(self, input_0, input_1): primals_1 = self.conv_q.weight primals_2 = self.conv_q.bias primals_4 = self.conv_k.weight primals_5 = self.conv_k.bias primals_7 = self.conv_v.weight primals_8 = self.conv_v.bias primals_9 = self.conv_o.weight primals_10 = self.conv_o.bias primals_3 = input_0 primals_6 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10]) return output[0]
Sobsz/uberduck-ml-dev
MultiHeadAttention
false
1,092
[ "Apache-2.0" ]
0
f099238f6f2e3f600d72d89dea3c883c59d91387
https://github.com/Sobsz/uberduck-ml-dev/tree/f099238f6f2e3f600d72d89dea3c883c59d91387
import math import torch import numpy as np def convert_pad_shape(pad_shape): """Reverse, then flatten a list of lists.""" l = pad_shape[::-1] pad_shape = [item for sublist in l for item in sublist] return pad_shape class BaseModule(torch.nn.Module): def __init__(self): super().__init__() @property def nparams(self): """ Returns number of trainable parameters of the module. """ num_params = 0 for name, param in self.named_parameters(): if param.requires_grad: num_params += np.prod(param.detach().cpu().numpy().shape) return num_params def relocate_input(self, x: 'list'): """ Relocates provided tensors to the same device set for the module. """ device = next(self.parameters()).device for i in range(len(x)): if isinstance(x[i], torch.Tensor) and x[i].device != device: x[i] = x[i] return x class Model(BaseModule): def __init__(self, channels, out_channels, n_heads, window_size=None, heads_share=True, p_dropout=0.0, proximal_bias=False, proximal_init =False): super().__init__() assert channels % n_heads == 0 self.channels = channels self.out_channels = out_channels self.n_heads = n_heads self.window_size = window_size self.heads_share = heads_share self.proximal_bias = proximal_bias self.p_dropout = p_dropout self.attn = None self.k_channels = channels // n_heads self.conv_q = torch.nn.Conv1d(channels, channels, 1) self.conv_k = torch.nn.Conv1d(channels, channels, 1) self.conv_v = torch.nn.Conv1d(channels, channels, 1) if window_size is not None: n_heads_rel = 1 if heads_share else n_heads rel_stddev = self.k_channels ** -0.5 self.emb_rel_k = torch.nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev) self.emb_rel_v = torch.nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev) self.conv_o = torch.nn.Conv1d(channels, out_channels, 1) self.drop = torch.nn.Dropout(p_dropout) torch.nn.init.xavier_uniform_(self.conv_q.weight) torch.nn.init.xavier_uniform_(self.conv_k.weight) if proximal_init: self.conv_k.weight.data.copy_(self.conv_q.weight.data) self.conv_k.bias.data.copy_(self.conv_q.bias.data) torch.nn.init.xavier_uniform_(self.conv_v.weight) def forward(self, x, c, attn_mask=None): q = self.conv_q(x) k = self.conv_k(c) v = self.conv_v(c) x, self.attn = self.attention(q, k, v, mask=attn_mask) x = self.conv_o(x) return x def attention(self, query, key, value, mask=None): b, d, t_s, t_t = *key.size(), query.size(2) query = query.view(b, self.n_heads, self.k_channels, t_t).transpose( 2, 3) key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) value = value.view(b, self.n_heads, self.k_channels, t_s).transpose( 2, 3) scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(self .k_channels) if self.window_size is not None: assert t_s == t_t, 'Relative attention is only available for self-attention.' key_relative_embeddings = self._get_relative_embeddings(self. emb_rel_k, t_s) rel_logits = self._matmul_with_relative_keys(query, key_relative_embeddings) rel_logits = self._relative_position_to_absolute_position( rel_logits) scores_local = rel_logits / math.sqrt(self.k_channels) scores = scores + scores_local if self.proximal_bias: assert t_s == t_t, 'Proximal bias is only available fo # ... truncated (>4000 chars) for memory efficiency
WNConv2d
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/4m/c4munax4bhqei64mhriszwqd42q6bjyh4nv3jazxhymu3f7wtucw.py # Topologically Sorted Source Nodes: [_weight_norm], Original ATen: [aten._weight_norm_interface] # Source node to ATen node mapping: # _weight_norm => div, mul, pow_1, pow_2, sum_1 # Graph fragment: # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_2, 2), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1, 2, 3], True), kwargs = {}) # %pow_2 : [num_users=2] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_1, %pow_2), kwargs = {}) # %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %div), kwargs = {}) triton_per_fused__weight_norm_interface_0 = async_compile.triton('triton_per_fused__weight_norm_interface_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[4, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__weight_norm_interface_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__weight_norm_interface_0(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 4 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (64*x0)), xmask, other=0.0) tmp7 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.where(xmask, tmp2, 0) tmp5 = tl.sum(tmp4, 1)[:, None] tmp6 = libdevice.sqrt(tmp5) tmp8 = tmp7 / tmp6 tmp9 = tmp0 * tmp8 tl.debug_barrier() tl.store(in_out_ptr0 + (x0), tmp6, xmask) tl.store(out_ptr0 + (r1 + (64*x0)), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/tc/ctcagp37ljugm52zu6ckorigrppqo67voefe2f2odg5r6hyllhyu.py # Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution] # Source node to ATen node mapping: # out => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_4, %mul, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x2), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32) buf1 = reinterpret_tensor(buf0, (4, 1, 1, 1), (1, 1, 1, 1), 0); del buf0 # reuse buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [_weight_norm], Original ATen: [aten._weight_norm_interface] stream0 = get_raw_stream(0) triton_per_fused__weight_norm_interface_0.run(buf1, primals_2, primals_1, buf2, 4, 64, grid=grid(4), stream=stream0) # Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution] buf3 = extern_kernels.convolution(primals_4, buf2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 1, 1), (4, 1, 1, 1)) buf4 = buf3; del buf3 # reuse # Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution] triton_poi_fused_convolution_1.run(buf4, primals_3, 16, grid=grid(16), stream=stream0) del primals_3 return (buf4, buf2, primals_1, primals_2, primals_4, buf1, buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn import torch.utils.data class WNConv2d(nn.Module): def __init__(self, in_channel, out_channel, kernel_size, stride=1, padding=0, bias=True, activation=None): super().__init__() self.conv = nn.utils.weight_norm(nn.Conv2d(in_channel, out_channel, kernel_size, stride=stride, padding=padding, bias=bias)) self.out_channel = out_channel if isinstance(kernel_size, int): kernel_size = [kernel_size, kernel_size] self.kernel_size = kernel_size self.activation = activation def forward(self, input): out = self.conv(input) if self.activation is not None: out = self.activation(out) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channel': 4, 'out_channel': 4, 'kernel_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch import nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused__weight_norm_interface_0(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0) tmp7 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.where(xmask, tmp2, 0) tmp5 = tl.sum(tmp4, 1)[:, None] tmp6 = libdevice.sqrt(tmp5) tmp8 = tmp7 / tmp6 tmp9 = tmp0 * tmp8 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp6, xmask) tl.store(out_ptr0 + (r1 + 64 * x0), tmp9, xmask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32) buf1 = reinterpret_tensor(buf0, (4, 1, 1, 1), (1, 1, 1, 1), 0) del buf0 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_per_fused__weight_norm_interface_0[grid(4)](buf1, primals_2, primals_1, buf2, 4, 64, XBLOCK=1, num_warps=2, num_stages=1) buf3 = extern_kernels.convolution(primals_4, buf2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 1, 1), (4, 1, 1, 1)) buf4 = buf3 del buf3 triton_poi_fused_convolution_1[grid(16)](buf4, primals_3, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_3 return buf4, buf2, primals_1, primals_2, primals_4, buf1, buf2 class WNConv2dNew(nn.Module): def __init__(self, in_channel, out_channel, kernel_size, stride=1, padding=0, bias=True, activation=None): super().__init__() self.conv = nn.utils.weight_norm(nn.Conv2d(in_channel, out_channel, kernel_size, stride=stride, padding=padding, bias=bias)) self.out_channel = out_channel if isinstance(kernel_size, int): kernel_size = [kernel_size, kernel_size] self.kernel_size = kernel_size self.activation = activation def forward(self, input_0): primals_3 = self.conv.bias primals_1 = self.conv.weight_g primals_2 = self.conv.weight_v primals_4 = input_0 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
Shivanshu-Gupta/KaoKore-VQ-VAE2
WNConv2d
false
1,093
[ "MIT" ]
0
38a88ba312dee3c0e2c1aaf02e1c1754ba19ac0c
https://github.com/Shivanshu-Gupta/KaoKore-VQ-VAE2/tree/38a88ba312dee3c0e2c1aaf02e1c1754ba19ac0c
import torch from torch import nn import torch.utils.data class Model(nn.Module): def __init__(self, in_channel, out_channel, kernel_size, stride=1, padding=0, bias=True, activation=None): super().__init__() self.conv = nn.utils.weight_norm(nn.Conv2d(in_channel, out_channel, kernel_size, stride=stride, padding=padding, bias=bias)) self.out_channel = out_channel if isinstance(kernel_size, int): kernel_size = [kernel_size, kernel_size] self.kernel_size = kernel_size self.activation = activation def forward(self, input): out = self.conv(input) if self.activation is not None: out = self.activation(out) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 4, 4]
DisparityRegression
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/mt/cmtrzhde3xvkbbrbxbhlpwzr6zziv6z7o2vl6xhkqbvvxtyrdmk5.py # Topologically Sorted Source Nodes: [max_d, index, setitem, setitem_1, prob, index_1, setitem_2, setitem_3, prob_1, index_2, setitem_4, setitem_5, prob_2, index_3, setitem_6, setitem_7, prob_3, index_4, setitem_8, setitem_9, prob_4, index_5, setitem_10, setitem_11, prob_5, index_6, setitem_12, setitem_13, prob_6, index_7, setitem_14, setitem_15, prob_7, index_8, setitem_16, setitem_17, prob_8], Original ATen: [aten.argmax, aten.add, aten.lift_fresh, aten.index_put, aten.gather] # Source node to ATen node mapping: # index => add # index_1 => add_1 # index_2 => add_2 # index_3 => add_3 # index_4 => add_4 # index_5 => add_5 # index_6 => add_6 # index_7 => add_7 # index_8 => add_8 # max_d => argmax # prob => gather # prob_1 => gather_1 # prob_2 => gather_2 # prob_3 => gather_3 # prob_4 => gather_4 # prob_5 => gather_5 # prob_6 => gather_6 # prob_7 => gather_7 # prob_8 => gather_8 # setitem => full_default, index_put # setitem_1 => full_default_1, index_put_1 # setitem_10 => full_default_10, index_put_10 # setitem_11 => full_default_11, index_put_11 # setitem_12 => full_default_12, index_put_12 # setitem_13 => full_default_13, index_put_13 # setitem_14 => full_default_14, index_put_14 # setitem_15 => full_default_15, index_put_15 # setitem_16 => full_default_16, index_put_16 # setitem_17 => full_default_17, index_put_17 # setitem_2 => full_default_2, index_put_2 # setitem_3 => full_default_3, index_put_3 # setitem_4 => full_default_4, index_put_4 # setitem_5 => full_default_5, index_put_5 # setitem_6 => full_default_6, index_put_6 # setitem_7 => full_default_7, index_put_7 # setitem_8 => full_default_8, index_put_8 # setitem_9 => full_default_9, index_put_9 # Graph fragment: # %argmax : [num_users=9] = call_function[target=torch.ops.aten.argmax.default](args = (%arg0_1, 1, True), kwargs = {}) # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%argmax, -4), kwargs = {}) # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0), kwargs = {dtype: torch.int64, layout: torch.strided, device: cpu, pin_memory: False}) # %index_put : [num_users=2] = call_function[target=torch.ops.aten.index_put_.default](args = (%add, [%lt], %full_default), kwargs = {}) # %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 3), kwargs = {dtype: torch.int64, layout: torch.strided, device: cpu, pin_memory: False}) # %index_put_1 : [num_users=2] = call_function[target=torch.ops.aten.index_put_.default](args = (%index_put, [%gt], %full_default_1), kwargs = {}) # %gather : [num_users=1] = call_function[target=torch.ops.aten.gather.default](args = (%arg0_1, 1, %index_put_1), kwargs = {}) # %add_1 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%argmax, -3), kwargs = {}) # %full_default_2 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0), kwargs = {dtype: torch.int64, layout: torch.strided, device: cpu, pin_memory: False}) # %index_put_2 : [num_users=2] = call_function[target=torch.ops.aten.index_put_.default](args = (%add_1, [%lt_1], %full_default_2), kwargs = {}) # %full_default_3 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 3), kwargs = {dtype: torch.int64, layout: torch.strided, device: cpu, pin_memory: False}) # %index_put_3 : [num_users=2] = call_function[target=torch.ops.aten.index_put_.default](args = (%index_put_2, [%gt_1], %full_default_3), kwargs = {}) # %gather_1 : [num_users=1] = call_function[target=torch.ops.aten.gather.default](args = (%arg0_1, 1, %index_put_3), kwargs = {}) # %add_2 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%argmax, -2), kwargs = {}) # %full_default_4 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0), kwargs = {dtype: torch.int64, layout: torch.strided, device: cpu, pin_memory: False}) # %index_put_4 : [num_users=2] = call_function[target=torch.ops.aten.index_put_.default](args = (%add_2, [%lt_2], %full_default_4), kwargs = {}) # %full_default_5 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 3), kwargs = {dtype: torch.int64, layout: torch.strided, device: cpu, pin_memory: False}) # %index_put_5 : [num_users=2] = call_function[target=torch.ops.aten.index_put_.default](args = (%index_put_4, [%gt_2], %full_default_5), kwargs = {}) # %gather_2 : [num_users=1] = call_function[target=torch.ops.aten.gather.default](args = (%arg0_1, 1, %index_put_5), kwargs = {}) # %add_3 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%argmax, -1), kwargs = {}) # %full_default_6 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0), kwargs = {dtype: torch.int64, layout: torch.strided, device: cpu, pin_memory: False}) # %index_put_6 : [num_users=2] = call_function[target=torch.ops.aten.index_put_.default](args = (%add_3, [%lt_3], %full_default_6), kwargs = {}) # %full_default_7 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 3), kwargs = {dtype: torch.int64, layout: torch.strided, device: cpu, pin_memory: False}) # %index_put_7 : [num_users=2] = call_function[target=torch.ops.aten.index_put_.default](args = (%index_put_6, [%gt_3], %full_default_7), kwargs = {}) # %gather_3 : [num_users=1] = call_function[target=torch.ops.aten.gather.default](args = (%arg0_1, 1, %index_put_7), kwargs = {}) # %add_4 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%argmax, 0), kwargs = {}) # %full_default_8 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0), kwargs = {dtype: torch.int64, layout: torch.strided, device: cpu, pin_memory: False}) # %index_put_8 : [num_users=2] = call_function[target=torch.ops.aten.index_put_.default](args = (%add_4, [%lt_4], %full_default_8), kwargs = {}) # %full_default_9 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 3), kwargs = {dtype: torch.int64, layout: torch.strided, device: cpu, pin_memory: False}) # %index_put_9 : [num_users=2] = call_function[target=torch.ops.aten.index_put_.default](args = (%index_put_8, [%gt_4], %full_default_9), kwargs = {}) # %gather_4 : [num_users=1] = call_function[target=torch.ops.aten.gather.default](args = (%arg0_1, 1, %index_put_9), kwargs = {}) # %add_5 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%argmax, 1), kwargs = {}) # %full_default_10 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0), kwargs = {dtype: torch.int64, layout: torch.strided, device: cpu, pin_memory: False}) # %index_put_10 : [num_users=2] = call_function[target=torch.ops.aten.index_put_.default](args = (%add_5, [%lt_5], %full_default_10), kwargs = {}) # %full_default_11 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 3), kwargs = {dtype: torch.int64, layout: torch.strided, device: cpu, pin_memory: False}) # %index_put_11 : [num_users=2] = call_function[target=torch.ops.aten.index_put_.default](args = (%index_put_10, [%gt_5], %full_default_11), kwargs = {}) # %gather_5 : [num_users=1] = call_function[target=torch.ops.aten.gather.default](args = (%arg0_1, 1, %index_put_11), kwargs = {}) # %add_6 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%argmax, 2), kwargs = {}) # %full_default_12 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0), kwargs = {dtype: torch.int64, layout: torch.strided, device: cpu, pin_memory: False}) # %index_put_12 : [num_users=2] = call_function[target=torch.ops.aten.index_put_.default](args = (%add_6, [%lt_6], %full_default_12), kwargs = {}) # %full_default_13 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 3), kwargs = {dtype: torch.int64, layout: torch.strided, device: cpu, pin_memory: False}) # %index_put_13 : [num_users=2] = call_function[target=torch.ops.aten.index_put_.default](args = (%index_put_12, [%gt_6], %full_default_13), kwargs = {}) # %gather_6 : [num_users=1] = call_function[target=torch.ops.aten.gather.default](args = (%arg0_1, 1, %index_put_13), kwargs = {}) # %add_7 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%argmax, 3), kwargs = {}) # %full_default_14 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0), kwargs = {dtype: torch.int64, layout: torch.strided, device: cpu, pin_memory: False}) # %index_put_14 : [num_users=2] = call_function[target=torch.ops.aten.index_put_.default](args = (%add_7, [%lt_7], %full_default_14), kwargs = {}) # %full_default_15 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 3), kwargs = {dtype: torch.int64, layout: torch.strided, device: cpu, pin_memory: False}) # %index_put_15 : [num_users=2] = call_function[target=torch.ops.aten.index_put_.default](args = (%index_put_14, [%gt_7], %full_default_15), kwargs = {}) # %gather_7 : [num_users=1] = call_function[target=torch.ops.aten.gather.default](args = (%arg0_1, 1, %index_put_15), kwargs = {}) # %add_8 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%argmax, 4), kwargs = {}) # %full_default_16 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0), kwargs = {dtype: torch.int64, layout: torch.strided, device: cpu, pin_memory: False}) # %index_put_16 : [num_users=2] = call_function[target=torch.ops.aten.index_put_.default](args = (%add_8, [%lt_8], %full_default_16), kwargs = {}) # %full_default_17 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 3), kwargs = {dtype: torch.int64, layout: torch.strided, device: cpu, pin_memory: False}) # %index_put_17 : [num_users=2] = call_function[target=torch.ops.aten.index_put_.default](args = (%index_put_16, [%gt_8], %full_default_17), kwargs = {}) # %gather_8 : [num_users=1] = call_function[target=torch.ops.aten.gather.default](args = (%arg0_1, 1, %index_put_17), kwargs = {}) triton_poi_fused_add_argmax_gather_index_put_lift_fresh_0 = async_compile.triton('triton_poi_fused_add_argmax_gather_index_put_lift_fresh_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i64', 2: '*i64', 3: '*i64', 4: '*i64', 5: '*i64', 6: '*i64', 7: '*i64', 8: '*i64', 9: '*i64', 10: '*fp32', 11: '*fp32', 12: '*fp32', 13: '*fp32', 14: '*fp32', 15: '*fp32', 16: '*fp32', 17: '*fp32', 18: '*fp32', 19: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 19), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_argmax_gather_index_put_lift_fresh_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_argmax_gather_index_put_lift_fresh_0(in_ptr0, out_ptr2, out_ptr4, out_ptr6, out_ptr8, out_ptr10, out_ptr12, out_ptr14, out_ptr16, out_ptr18, out_ptr19, out_ptr20, out_ptr21, out_ptr22, out_ptr23, out_ptr24, out_ptr25, out_ptr26, out_ptr27, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = (xindex // 16) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask) tmp1 = tl.load(in_ptr0 + (16 + x0 + (64*x1)), xmask) tmp17 = tl.load(in_ptr0 + (32 + x0 + (64*x1)), xmask) tmp32 = tl.load(in_ptr0 + (48 + x0 + (64*x1)), xmask) tmp2 = tmp0 > tmp1 tmp3 = tmp0 == tmp1 tmp4 = tmp0 != tmp0 tmp5 = tmp1 != tmp1 tmp6 = tmp4 > tmp5 tmp7 = tmp2 | tmp6 tmp8 = tmp4 & tmp5 tmp9 = tmp3 | tmp8 tmp10 = tl.full([1], 0, tl.int64) tmp11 = tl.full([1], 1, tl.int64) tmp12 = tmp10 < tmp11 tmp13 = tmp9 & tmp12 tmp14 = tmp7 | tmp13 tmp15 = tl.where(tmp14, tmp0, tmp1) tmp16 = tl.where(tmp14, tmp10, tmp11) tmp18 = tmp15 > tmp17 tmp19 = tmp15 == tmp17 tmp20 = tmp15 != tmp15 tmp21 = tmp17 != tmp17 tmp22 = tmp20 > tmp21 tmp23 = tmp18 | tmp22 tmp24 = tmp20 & tmp21 tmp25 = tmp19 | tmp24 tmp26 = tl.full([1], 2, tl.int64) tmp27 = tmp16 < tmp26 tmp28 = tmp25 & tmp27 tmp29 = tmp23 | tmp28 tmp30 = tl.where(tmp29, tmp15, tmp17) tmp31 = tl.where(tmp29, tmp16, tmp26) tmp33 = tmp30 > tmp32 tmp34 = tmp30 == tmp32 tmp35 = tmp30 != tmp30 tmp36 = tmp32 != tmp32 tmp37 = tmp35 > tmp36 tmp38 = tmp33 | tmp37 tmp39 = tmp35 & tmp36 tmp40 = tmp34 | tmp39 tmp41 = tl.full([1], 3, tl.int64) tmp42 = tmp31 < tmp41 tmp43 = tmp40 & tmp42 tmp44 = tmp38 | tmp43 tmp45 = tl.where(tmp44, tmp30, tmp32) tmp46 = tl.where(tmp44, tmp31, tmp41) tmp47 = tl.full([1], -4, tl.int64) tmp48 = tmp46 + tmp47 tmp49 = tmp48 < tmp10 tmp50 = tl.where(tmp49, tmp10, tmp48) tmp51 = tmp50 > tmp41 tmp52 = tl.where(tmp51, tmp41, tmp50) tmp53 = tl.full([1], -3, tl.int64) tmp54 = tmp46 + tmp53 tmp55 = tmp54 < tmp10 tmp56 = tl.where(tmp55, tmp10, tmp54) tmp57 = tmp56 > tmp41 tmp58 = tl.where(tmp57, tmp41, tmp56) tmp59 = tl.full([1], -2, tl.int64) tmp60 = tmp46 + tmp59 tmp61 = tmp60 < tmp10 tmp62 = tl.where(tmp61, tmp10, tmp60) tmp63 = tmp62 > tmp41 tmp64 = tl.where(tmp63, tmp41, tmp62) tmp65 = tl.full([1], -1, tl.int64) tmp66 = tmp46 + tmp65 tmp67 = tmp66 < tmp10 tmp68 = tl.where(tmp67, tmp10, tmp66) tmp69 = tmp68 > tmp41 tmp70 = tl.where(tmp69, tmp41, tmp68) tmp71 = tmp46 + tmp10 tmp72 = tmp71 < tmp10 tmp73 = tl.where(tmp72, tmp10, tmp71) tmp74 = tmp73 > tmp41 tmp75 = tl.where(tmp74, tmp41, tmp73) tmp76 = tmp46 + tmp11 tmp77 = tmp76 < tmp10 tmp78 = tl.where(tmp77, tmp10, tmp76) tmp79 = tmp78 > tmp41 tmp80 = tl.where(tmp79, tmp41, tmp78) tmp81 = tmp46 + tmp26 tmp82 = tmp81 < tmp10 tmp83 = tl.where(tmp82, tmp10, tmp81) tmp84 = tmp83 > tmp41 tmp85 = tl.where(tmp84, tmp41, tmp83) tmp86 = tmp46 + tmp41 tmp87 = tmp86 < tmp10 tmp88 = tl.where(tmp87, tmp10, tmp86) tmp89 = tmp88 > tmp41 tmp90 = tl.where(tmp89, tmp41, tmp88) tmp91 = tl.full([1], 4, tl.int64) tmp92 = tmp46 + tmp91 tmp93 = tmp92 < tmp10 tmp94 = tl.where(tmp93, tmp10, tmp92) tmp95 = tmp94 > tmp41 tmp96 = tl.where(tmp95, tmp41, tmp94) tmp97 = tl.full([XBLOCK], 4, tl.int32) tmp98 = tmp52 + tmp97 tmp99 = tmp52 < 0 tmp100 = tl.where(tmp99, tmp98, tmp52) tl.device_assert(((0 <= tmp100) & (tmp100 < 4)) | ~(xmask), "index out of bounds: 0 <= tmp100 < 4") tmp102 = tl.load(in_ptr0 + (x0 + (16*tmp100) + (64*x1)), xmask) tmp103 = tmp58 + tmp97 tmp104 = tmp58 < 0 tmp105 = tl.where(tmp104, tmp103, tmp58) tl.device_assert(((0 <= tmp105) & (tmp105 < 4)) | ~(xmask), "index out of bounds: 0 <= tmp105 < 4") tmp107 = tl.load(in_ptr0 + (x0 + (16*tmp105) + (64*x1)), xmask) tmp108 = tmp64 + tmp97 tmp109 = tmp64 < 0 tmp110 = tl.where(tmp109, tmp108, tmp64) tl.device_assert(((0 <= tmp110) & (tmp110 < 4)) | ~(xmask), "index out of bounds: 0 <= tmp110 < 4") tmp112 = tl.load(in_ptr0 + (x0 + (16*tmp110) + (64*x1)), xmask) tmp113 = tmp70 + tmp97 tmp114 = tmp70 < 0 tmp115 = tl.where(tmp114, tmp113, tmp70) tl.device_assert(((0 <= tmp115) & (tmp115 < 4)) | ~(xmask), "index out of bounds: 0 <= tmp115 < 4") tmp117 = tl.load(in_ptr0 + (x0 + (16*tmp115) + (64*x1)), xmask) tmp118 = tmp75 + tmp97 tmp119 = tmp75 < 0 tmp120 = tl.where(tmp119, tmp118, tmp75) tl.device_assert(((0 <= tmp120) & (tmp120 < 4)) | ~(xmask), "index out of bounds: 0 <= tmp120 < 4") tmp122 = tl.load(in_ptr0 + (x0 + (16*tmp120) + (64*x1)), xmask) tmp123 = tmp80 + tmp97 tmp124 = tmp80 < 0 tmp125 = tl.where(tmp124, tmp123, tmp80) tl.device_assert(((0 <= tmp125) & (tmp125 < 4)) | ~(xmask), "index out of bounds: 0 <= tmp125 < 4") tmp127 = tl.load(in_ptr0 + (x0 + (16*tmp125) + (64*x1)), xmask) tmp128 = tmp85 + tmp97 tmp129 = tmp85 < 0 tmp130 = tl.where(tmp129, tmp128, tmp85) tl.device_assert(((0 <= tmp130) & (tmp130 < 4)) | ~(xmask), "index out of bounds: 0 <= tmp130 < 4") tmp132 = tl.load(in_ptr0 + (x0 + (16*tmp130) + (64*x1)), xmask) tmp133 = tmp90 + tmp97 tmp134 = tmp90 < 0 tmp135 = tl.where(tmp134, tmp133, tmp90) tl.device_assert(((0 <= tmp135) & (tmp135 < 4)) | ~(xmask), "index out of bounds: 0 <= tmp135 < 4") tmp137 = tl.load(in_ptr0 + (x0 + (16*tmp135) + (64*x1)), xmask) tmp138 = tmp96 + tmp97 tmp139 = tmp96 < 0 tmp140 = tl.where(tmp139, tmp138, tmp96) tl.device_assert(((0 <= tmp140) & (tmp140 < 4)) | ~(xmask), "index out of bounds: 0 <= tmp140 < 4") tmp142 = tl.load(in_ptr0 + (x0 + (16*tmp140) + (64*x1)), xmask) tl.store(out_ptr2 + (x0 + (144*x1)), tmp52, xmask) tl.store(out_ptr4 + (x0 + (144*x1)), tmp58, xmask) tl.store(out_ptr6 + (x0 + (144*x1)), tmp64, xmask) tl.store(out_ptr8 + (x0 + (144*x1)), tmp70, xmask) tl.store(out_ptr10 + (x0 + (144*x1)), tmp75, xmask) tl.store(out_ptr12 + (x0 + (144*x1)), tmp80, xmask) tl.store(out_ptr14 + (x0 + (144*x1)), tmp85, xmask) tl.store(out_ptr16 + (x0 + (144*x1)), tmp90, xmask) tl.store(out_ptr18 + (x0 + (144*x1)), tmp96, xmask) tl.store(out_ptr19 + (9*x2), tmp102, xmask) tl.store(out_ptr20 + (9*x2), tmp107, xmask) tl.store(out_ptr21 + (9*x2), tmp112, xmask) tl.store(out_ptr22 + (9*x2), tmp117, xmask) tl.store(out_ptr23 + (9*x2), tmp122, xmask) tl.store(out_ptr24 + (9*x2), tmp127, xmask) tl.store(out_ptr25 + (9*x2), tmp132, xmask) tl.store(out_ptr26 + (9*x2), tmp137, xmask) tl.store(out_ptr27 + (9*x2), tmp142, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/hc/chcrv4z3xuibekjnrpmxzsnizu3g4ubg6rgohy7cmfzimhemq5jp.py # Topologically Sorted Source Nodes: [sum_1, add_9, part_x_1, part_d, mul, out], Original ATen: [aten.sum, aten.add, aten.div, aten._to_copy, aten.mul] # Source node to ATen node mapping: # add_9 => add_9 # mul => mul # out => sum_2 # part_d => convert_element_type_1 # part_x_1 => div # sum_1 => sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%cat, [1], True), kwargs = {}) # %add_9 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_1, 1e-08), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%cat, %add_9), kwargs = {}) # %convert_element_type_1 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%cat_1, torch.float32), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, %convert_element_type_1), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {}) triton_per_fused__to_copy_add_div_mul_sum_1 = async_compile.triton('triton_per_fused__to_copy_add_div_mul_sum_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[64, 16], reduction_hint=ReductionHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i64', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__to_copy_add_div_mul_sum_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__to_copy_add_div_mul_sum_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 64 rnumel = 9 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = rindex < rnumel r1 = rindex x0 = xindex x2 = xindex % 16 x3 = (xindex // 16) tmp0 = tl.load(in_ptr0 + (r1 + (9*x0)), rmask & xmask, other=0.0) tmp8 = tl.load(in_ptr1 + (x2 + (16*r1) + (144*x3)), rmask & xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(rmask & xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp5 = 1e-08 tmp6 = tmp4 + tmp5 tmp7 = tmp0 / tmp6 tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 * tmp9 tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK]) tmp13 = tl.where(rmask & xmask, tmp11, 0) tmp14 = tl.sum(tmp13, 1)[:, None] tl.store(in_out_ptr0 + (x0), tmp14, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf30 = empty_strided_cuda((4, 9, 4, 4), (144, 16, 4, 1), torch.int64) buf2 = reinterpret_tensor(buf30, (4, 1, 4, 4), (144, 16, 4, 1), 0) # alias buf4 = reinterpret_tensor(buf30, (4, 1, 4, 4), (144, 16, 4, 1), 16) # alias buf6 = reinterpret_tensor(buf30, (4, 1, 4, 4), (144, 16, 4, 1), 32) # alias buf8 = reinterpret_tensor(buf30, (4, 1, 4, 4), (144, 16, 4, 1), 48) # alias buf10 = reinterpret_tensor(buf30, (4, 1, 4, 4), (144, 16, 4, 1), 64) # alias buf12 = reinterpret_tensor(buf30, (4, 1, 4, 4), (144, 16, 4, 1), 80) # alias buf14 = reinterpret_tensor(buf30, (4, 1, 4, 4), (144, 16, 4, 1), 96) # alias buf16 = reinterpret_tensor(buf30, (4, 1, 4, 4), (144, 16, 4, 1), 112) # alias buf18 = reinterpret_tensor(buf30, (4, 1, 4, 4), (144, 16, 4, 1), 128) # alias buf28 = empty_strided_cuda((4, 9, 4, 4), (144, 1, 36, 9), torch.float32) buf19 = reinterpret_tensor(buf28, (4, 1, 4, 4), (144, 1, 36, 9), 0) # alias buf20 = reinterpret_tensor(buf28, (4, 1, 4, 4), (144, 1, 36, 9), 1) # alias buf21 = reinterpret_tensor(buf28, (4, 1, 4, 4), (144, 1, 36, 9), 2) # alias buf22 = reinterpret_tensor(buf28, (4, 1, 4, 4), (144, 1, 36, 9), 3) # alias buf23 = reinterpret_tensor(buf28, (4, 1, 4, 4), (144, 1, 36, 9), 4) # alias buf24 = reinterpret_tensor(buf28, (4, 1, 4, 4), (144, 1, 36, 9), 5) # alias buf25 = reinterpret_tensor(buf28, (4, 1, 4, 4), (144, 1, 36, 9), 6) # alias buf26 = reinterpret_tensor(buf28, (4, 1, 4, 4), (144, 1, 36, 9), 7) # alias buf27 = reinterpret_tensor(buf28, (4, 1, 4, 4), (144, 1, 36, 9), 8) # alias # Topologically Sorted Source Nodes: [max_d, index, setitem, setitem_1, prob, index_1, setitem_2, setitem_3, prob_1, index_2, setitem_4, setitem_5, prob_2, index_3, setitem_6, setitem_7, prob_3, index_4, setitem_8, setitem_9, prob_4, index_5, setitem_10, setitem_11, prob_5, index_6, setitem_12, setitem_13, prob_6, index_7, setitem_14, setitem_15, prob_7, index_8, setitem_16, setitem_17, prob_8], Original ATen: [aten.argmax, aten.add, aten.lift_fresh, aten.index_put, aten.gather] stream0 = get_raw_stream(0) triton_poi_fused_add_argmax_gather_index_put_lift_fresh_0.run(arg0_1, buf2, buf4, buf6, buf8, buf10, buf12, buf14, buf16, buf18, buf19, buf20, buf21, buf22, buf23, buf24, buf25, buf26, buf27, 64, grid=grid(64), stream=stream0) del arg0_1 buf29 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32) buf31 = reinterpret_tensor(buf29, (4, 4, 4), (16, 4, 1), 0); del buf29 # reuse # Topologically Sorted Source Nodes: [sum_1, add_9, part_x_1, part_d, mul, out], Original ATen: [aten.sum, aten.add, aten.div, aten._to_copy, aten.mul] triton_per_fused__to_copy_add_div_mul_sum_1.run(buf31, buf28, buf30, 64, 9, grid=grid(64), stream=stream0) del buf10 del buf12 del buf14 del buf16 del buf18 del buf19 del buf2 del buf20 del buf21 del buf22 del buf23 del buf24 del buf25 del buf26 del buf27 del buf28 del buf30 del buf4 del buf6 del buf8 return (buf31, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.utils.data class DisparityRegression(nn.Module): def __init__(self, maxdisp, win_size): super(DisparityRegression, self).__init__() self.max_disp = maxdisp self.win_size = win_size def forward(self, x): disp = torch.arange(0, self.max_disp).view(1, -1, 1, 1).float() if self.win_size > 0: max_d = torch.argmax(x, dim=1, keepdim=True) d_value = [] prob_value = [] for d in range(-self.win_size, self.win_size + 1): index = max_d + d index[index < 0] = 0 index[index > x.shape[1] - 1] = x.shape[1] - 1 d_value.append(index) prob = torch.gather(x, dim=1, index=index) prob_value.append(prob) part_x = torch.cat(prob_value, dim=1) part_x = part_x / (torch.sum(part_x, dim=1, keepdim=True) + 1e-08) part_d = torch.cat(d_value, dim=1).float() out = torch.sum(part_x * part_d, dim=1) else: out = torch.sum(x * disp, 1) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'maxdisp': 4, 'win_size': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_argmax_gather_index_put_lift_fresh_0(in_ptr0, out_ptr2, out_ptr4, out_ptr6, out_ptr8, out_ptr10, out_ptr12, out_ptr14, out_ptr16, out_ptr18, out_ptr19, out_ptr20, out_ptr21, out_ptr22, out_ptr23, out_ptr24, out_ptr25, out_ptr26, out_ptr27, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tmp1 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask) tmp17 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask) tmp32 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask) tmp2 = tmp0 > tmp1 tmp3 = tmp0 == tmp1 tmp4 = tmp0 != tmp0 tmp5 = tmp1 != tmp1 tmp6 = tmp4 > tmp5 tmp7 = tmp2 | tmp6 tmp8 = tmp4 & tmp5 tmp9 = tmp3 | tmp8 tmp10 = tl.full([1], 0, tl.int64) tmp11 = tl.full([1], 1, tl.int64) tmp12 = tmp10 < tmp11 tmp13 = tmp9 & tmp12 tmp14 = tmp7 | tmp13 tmp15 = tl.where(tmp14, tmp0, tmp1) tmp16 = tl.where(tmp14, tmp10, tmp11) tmp18 = tmp15 > tmp17 tmp19 = tmp15 == tmp17 tmp20 = tmp15 != tmp15 tmp21 = tmp17 != tmp17 tmp22 = tmp20 > tmp21 tmp23 = tmp18 | tmp22 tmp24 = tmp20 & tmp21 tmp25 = tmp19 | tmp24 tmp26 = tl.full([1], 2, tl.int64) tmp27 = tmp16 < tmp26 tmp28 = tmp25 & tmp27 tmp29 = tmp23 | tmp28 tmp30 = tl.where(tmp29, tmp15, tmp17) tmp31 = tl.where(tmp29, tmp16, tmp26) tmp33 = tmp30 > tmp32 tmp34 = tmp30 == tmp32 tmp35 = tmp30 != tmp30 tmp36 = tmp32 != tmp32 tmp37 = tmp35 > tmp36 tmp38 = tmp33 | tmp37 tmp39 = tmp35 & tmp36 tmp40 = tmp34 | tmp39 tmp41 = tl.full([1], 3, tl.int64) tmp42 = tmp31 < tmp41 tmp43 = tmp40 & tmp42 tmp44 = tmp38 | tmp43 tl.where(tmp44, tmp30, tmp32) tmp46 = tl.where(tmp44, tmp31, tmp41) tmp47 = tl.full([1], -4, tl.int64) tmp48 = tmp46 + tmp47 tmp49 = tmp48 < tmp10 tmp50 = tl.where(tmp49, tmp10, tmp48) tmp51 = tmp50 > tmp41 tmp52 = tl.where(tmp51, tmp41, tmp50) tmp53 = tl.full([1], -3, tl.int64) tmp54 = tmp46 + tmp53 tmp55 = tmp54 < tmp10 tmp56 = tl.where(tmp55, tmp10, tmp54) tmp57 = tmp56 > tmp41 tmp58 = tl.where(tmp57, tmp41, tmp56) tmp59 = tl.full([1], -2, tl.int64) tmp60 = tmp46 + tmp59 tmp61 = tmp60 < tmp10 tmp62 = tl.where(tmp61, tmp10, tmp60) tmp63 = tmp62 > tmp41 tmp64 = tl.where(tmp63, tmp41, tmp62) tmp65 = tl.full([1], -1, tl.int64) tmp66 = tmp46 + tmp65 tmp67 = tmp66 < tmp10 tmp68 = tl.where(tmp67, tmp10, tmp66) tmp69 = tmp68 > tmp41 tmp70 = tl.where(tmp69, tmp41, tmp68) tmp71 = tmp46 + tmp10 tmp72 = tmp71 < tmp10 tmp73 = tl.where(tmp72, tmp10, tmp71) tmp74 = tmp73 > tmp41 tmp75 = tl.where(tmp74, tmp41, tmp73) tmp76 = tmp46 + tmp11 tmp77 = tmp76 < tmp10 tmp78 = tl.where(tmp77, tmp10, tmp76) tmp79 = tmp78 > tmp41 tmp80 = tl.where(tmp79, tmp41, tmp78) tmp81 = tmp46 + tmp26 tmp82 = tmp81 < tmp10 tmp83 = tl.where(tmp82, tmp10, tmp81) tmp84 = tmp83 > tmp41 tmp85 = tl.where(tmp84, tmp41, tmp83) tmp86 = tmp46 + tmp41 tmp87 = tmp86 < tmp10 tmp88 = tl.where(tmp87, tmp10, tmp86) tmp89 = tmp88 > tmp41 tmp90 = tl.where(tmp89, tmp41, tmp88) tmp91 = tl.full([1], 4, tl.int64) tmp92 = tmp46 + tmp91 tmp93 = tmp92 < tmp10 tmp94 = tl.where(tmp93, tmp10, tmp92) tmp95 = tmp94 > tmp41 tmp96 = tl.where(tmp95, tmp41, tmp94) tmp97 = tl.full([XBLOCK], 4, tl.int32) tmp98 = tmp52 + tmp97 tmp99 = tmp52 < 0 tmp100 = tl.where(tmp99, tmp98, tmp52) tl.device_assert((0 <= tmp100) & (tmp100 < 4) | ~xmask, 'index out of bounds: 0 <= tmp100 < 4') tmp102 = tl.load(in_ptr0 + (x0 + 16 * tmp100 + 64 * x1), xmask) tmp103 = tmp58 + tmp97 tmp104 = tmp58 < 0 tmp105 = tl.where(tmp104, tmp103, tmp58) tl.device_assert((0 <= tmp105) & (tmp105 < 4) | ~xmask, 'index out of bounds: 0 <= tmp105 < 4') tmp107 = tl.load(in_ptr0 + (x0 + 16 * tmp105 + 64 * x1), xmask) tmp108 = tmp64 + tmp97 tmp109 = tmp64 < 0 tmp110 = tl.where(tmp109, tmp108, tmp64) tl.device_assert((0 <= tmp110) & (tmp110 < 4) | ~xmask, 'index out of bounds: 0 <= tmp110 < 4') tmp112 = tl.load(in_ptr0 + (x0 + 16 * tmp110 + 64 * x1), xmask) tmp113 = tmp70 + tmp97 tmp114 = tmp70 < 0 tmp115 = tl.where(tmp114, tmp113, tmp70) tl.device_assert((0 <= tmp115) & (tmp115 < 4) | ~xmask, 'index out of bounds: 0 <= tmp115 < 4') tmp117 = tl.load(in_ptr0 + (x0 + 16 * tmp115 + 64 * x1), xmask) tmp118 = tmp75 + tmp97 tmp119 = tmp75 < 0 tmp120 = tl.where(tmp119, tmp118, tmp75) tl.device_assert((0 <= tmp120) & (tmp120 < 4) | ~xmask, 'index out of bounds: 0 <= tmp120 < 4') tmp122 = tl.load(in_ptr0 + (x0 + 16 * tmp120 + 64 * x1), xmask) tmp123 = tmp80 + tmp97 tmp124 = tmp80 < 0 tmp125 = tl.where(tmp124, tmp123, tmp80) tl.device_assert((0 <= tmp125) & (tmp125 < 4) | ~xmask, 'index out of bounds: 0 <= tmp125 < 4') tmp127 = tl.load(in_ptr0 + (x0 + 16 * tmp125 + 64 * x1), xmask) tmp128 = tmp85 + tmp97 tmp129 = tmp85 < 0 tmp130 = tl.where(tmp129, tmp128, tmp85) tl.device_assert((0 <= tmp130) & (tmp130 < 4) | ~xmask, 'index out of bounds: 0 <= tmp130 < 4') tmp132 = tl.load(in_ptr0 + (x0 + 16 * tmp130 + 64 * x1), xmask) tmp133 = tmp90 + tmp97 tmp134 = tmp90 < 0 tmp135 = tl.where(tmp134, tmp133, tmp90) tl.device_assert((0 <= tmp135) & (tmp135 < 4) | ~xmask, 'index out of bounds: 0 <= tmp135 < 4') tmp137 = tl.load(in_ptr0 + (x0 + 16 * tmp135 + 64 * x1), xmask) tmp138 = tmp96 + tmp97 tmp139 = tmp96 < 0 tmp140 = tl.where(tmp139, tmp138, tmp96) tl.device_assert((0 <= tmp140) & (tmp140 < 4) | ~xmask, 'index out of bounds: 0 <= tmp140 < 4') tmp142 = tl.load(in_ptr0 + (x0 + 16 * tmp140 + 64 * x1), xmask) tl.store(out_ptr2 + (x0 + 144 * x1), tmp52, xmask) tl.store(out_ptr4 + (x0 + 144 * x1), tmp58, xmask) tl.store(out_ptr6 + (x0 + 144 * x1), tmp64, xmask) tl.store(out_ptr8 + (x0 + 144 * x1), tmp70, xmask) tl.store(out_ptr10 + (x0 + 144 * x1), tmp75, xmask) tl.store(out_ptr12 + (x0 + 144 * x1), tmp80, xmask) tl.store(out_ptr14 + (x0 + 144 * x1), tmp85, xmask) tl.store(out_ptr16 + (x0 + 144 * x1), tmp90, xmask) tl.store(out_ptr18 + (x0 + 144 * x1), tmp96, xmask) tl.store(out_ptr19 + 9 * x2, tmp102, xmask) tl.store(out_ptr20 + 9 * x2, tmp107, xmask) tl.store(out_ptr21 + 9 * x2, tmp112, xmask) tl.store(out_ptr22 + 9 * x2, tmp117, xmask) tl.store(out_ptr23 + 9 * x2, tmp122, xmask) tl.store(out_ptr24 + 9 * x2, tmp127, xmask) tl.store(out_ptr25 + 9 * x2, tmp132, xmask) tl.store(out_ptr26 + 9 * x2, tmp137, xmask) tl.store(out_ptr27 + 9 * x2, tmp142, xmask) @triton.jit def triton_per_fused__to_copy_add_div_mul_sum_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 64 rnumel = 9 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] rmask = rindex < rnumel r1 = rindex x0 = xindex x2 = xindex % 16 x3 = xindex // 16 tmp0 = tl.load(in_ptr0 + (r1 + 9 * x0), rmask & xmask, other=0.0) tmp8 = tl.load(in_ptr1 + (x2 + 16 * r1 + 144 * x3), rmask & xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(rmask & xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp5 = 1e-08 tmp6 = tmp4 + tmp5 tmp7 = tmp0 / tmp6 tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 * tmp9 tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK]) tmp13 = tl.where(rmask & xmask, tmp11, 0) tmp14 = tl.sum(tmp13, 1)[:, None] tl.store(in_out_ptr0 + x0, tmp14, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf30 = empty_strided_cuda((4, 9, 4, 4), (144, 16, 4, 1), torch.int64) buf2 = reinterpret_tensor(buf30, (4, 1, 4, 4), (144, 16, 4, 1), 0) buf4 = reinterpret_tensor(buf30, (4, 1, 4, 4), (144, 16, 4, 1), 16) buf6 = reinterpret_tensor(buf30, (4, 1, 4, 4), (144, 16, 4, 1), 32) buf8 = reinterpret_tensor(buf30, (4, 1, 4, 4), (144, 16, 4, 1), 48) buf10 = reinterpret_tensor(buf30, (4, 1, 4, 4), (144, 16, 4, 1), 64) buf12 = reinterpret_tensor(buf30, (4, 1, 4, 4), (144, 16, 4, 1), 80) buf14 = reinterpret_tensor(buf30, (4, 1, 4, 4), (144, 16, 4, 1), 96) buf16 = reinterpret_tensor(buf30, (4, 1, 4, 4), (144, 16, 4, 1), 112) buf18 = reinterpret_tensor(buf30, (4, 1, 4, 4), (144, 16, 4, 1), 128) buf28 = empty_strided_cuda((4, 9, 4, 4), (144, 1, 36, 9), torch.float32 ) buf19 = reinterpret_tensor(buf28, (4, 1, 4, 4), (144, 1, 36, 9), 0) buf20 = reinterpret_tensor(buf28, (4, 1, 4, 4), (144, 1, 36, 9), 1) buf21 = reinterpret_tensor(buf28, (4, 1, 4, 4), (144, 1, 36, 9), 2) buf22 = reinterpret_tensor(buf28, (4, 1, 4, 4), (144, 1, 36, 9), 3) buf23 = reinterpret_tensor(buf28, (4, 1, 4, 4), (144, 1, 36, 9), 4) buf24 = reinterpret_tensor(buf28, (4, 1, 4, 4), (144, 1, 36, 9), 5) buf25 = reinterpret_tensor(buf28, (4, 1, 4, 4), (144, 1, 36, 9), 6) buf26 = reinterpret_tensor(buf28, (4, 1, 4, 4), (144, 1, 36, 9), 7) buf27 = reinterpret_tensor(buf28, (4, 1, 4, 4), (144, 1, 36, 9), 8) get_raw_stream(0) triton_poi_fused_add_argmax_gather_index_put_lift_fresh_0[grid(64)]( arg0_1, buf2, buf4, buf6, buf8, buf10, buf12, buf14, buf16, buf18, buf19, buf20, buf21, buf22, buf23, buf24, buf25, buf26, buf27, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 buf29 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32) buf31 = reinterpret_tensor(buf29, (4, 4, 4), (16, 4, 1), 0) del buf29 triton_per_fused__to_copy_add_div_mul_sum_1[grid(64)](buf31, buf28, buf30, 64, 9, XBLOCK=8, num_warps=2, num_stages=1) del buf10 del buf12 del buf14 del buf16 del buf18 del buf19 del buf2 del buf20 del buf21 del buf22 del buf23 del buf24 del buf25 del buf26 del buf27 del buf28 del buf30 del buf4 del buf6 del buf8 return buf31, class DisparityRegressionNew(nn.Module): def __init__(self, maxdisp, win_size): super(DisparityRegressionNew, self).__init__() self.max_disp = maxdisp self.win_size = win_size def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
SpadeLiu/Graft-PSMNet
DisparityRegression
false
1,094
[ "MIT" ]
0
1f2950d5afd85237f8d3604caab20dd47a8c9889
https://github.com/SpadeLiu/Graft-PSMNet/tree/1f2950d5afd85237f8d3604caab20dd47a8c9889
import torch import torch.nn as nn import torch.utils.data class Model(nn.Module): def __init__(self, maxdisp, win_size): super().__init__() self.max_disp = maxdisp self.win_size = win_size def forward(self, x): disp = torch.arange(0, self.max_disp).view(1, -1, 1, 1).float() if self.win_size > 0: max_d = torch.argmax(x, dim=1, keepdim=True) d_value = [] prob_value = [] for d in range(-self.win_size, self.win_size + 1): index = max_d + d index[index < 0] = 0 index[index > x.shape[1] - 1] = x.shape[1] - 1 d_value.append(index) prob = torch.gather(x, dim=1, index=index) prob_value.append(prob) part_x = torch.cat(prob_value, dim=1) part_x = part_x / (torch.sum(part_x, dim=1, keepdim=True) + 1e-08) part_d = torch.cat(d_value, dim=1).float() out = torch.sum(part_x * part_d, dim=1) else: out = torch.sum(x * disp, 1) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 4]
Message_Passing_Unit_v1
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/oc/coczp7rkil43vyoaywnm6ib3ifhjmr5ddfinfrw2suxqd75btpch.py # Topologically Sorted Source Nodes: [gate, gate_1], Original ATen: [aten.cat, aten.relu] # Source node to ATen node mapping: # gate => cat # gate_1 => relu # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %primals_2], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%cat,), kwargs = {}) triton_poi_fused_cat_relu_0 = async_compile.triton('triton_poi_fused_cat_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_relu_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = (xindex // 8) x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tmp11 = tl.full([1], 0, tl.int32) tmp12 = triton_helpers.maximum(tmp11, tmp10) tl.store(out_ptr0 + (x2), tmp12, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/rg/crgu7axt3pkodcuj44cx7l5wvaq2wktx2n3wt4l46yox7manytzy.py # Topologically Sorted Source Nodes: [sigmoid, gate_2], Original ATen: [aten.sigmoid, aten.mean] # Source node to ATen node mapping: # gate_2 => mean # sigmoid => sigmoid # Graph fragment: # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%addmm,), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%sigmoid, [1]), kwargs = {}) triton_per_fused_mean_sigmoid_1 = async_compile.triton('triton_per_fused_mean_sigmoid_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[4, 128], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_sigmoid_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_mean_sigmoid_1(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 4 rnumel = 128 RBLOCK: tl.constexpr = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (128*x0)), xmask, other=0.0) tmp1 = tl.sigmoid(tmp0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.where(xmask, tmp2, 0) tmp5 = tl.sum(tmp4, 1)[:, None] tl.store(out_ptr0 + (x0), tmp5, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/o4/co4bcog5oirylpafkr3bzmpf7tpkw52rybp2isvs75jt6osdlvhc.py # Topologically Sorted Source Nodes: [output], Original ATen: [aten.mul] # Source node to ATen node mapping: # output => mul # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %expand), kwargs = {}) triton_poi_fused_mul_2 = async_compile.triton('triton_poi_fused_mul_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp2 = 128.0 tmp3 = tmp1 / tmp2 tmp4 = tmp0 * tmp3 tl.store(out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (128, 8), (8, 1)) assert_size_stride(primals_4, (128, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32) # Topologically Sorted Source Nodes: [gate, gate_1], Original ATen: [aten.cat, aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_cat_relu_0.run(primals_1, primals_2, buf0, 32, grid=grid(32), stream=stream0) del primals_1 buf1 = empty_strided_cuda((4, 128), (128, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm] extern_kernels.addmm(primals_4, buf0, reinterpret_tensor(primals_3, (8, 128), (1, 8), 0), alpha=1, beta=1, out=buf1) del primals_3 del primals_4 buf2 = empty_strided_cuda((4, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [sigmoid, gate_2], Original ATen: [aten.sigmoid, aten.mean] triton_per_fused_mean_sigmoid_1.run(buf1, buf2, 4, 128, grid=grid(4), stream=stream0) buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [output], Original ATen: [aten.mul] triton_poi_fused_mul_2.run(primals_2, buf2, buf3, 16, grid=grid(16), stream=stream0) del buf2 return (buf3, primals_2, buf0, buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((128, 8), (8, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torchvision.transforms import functional as F import torch.utils.data from torch import nn import torch.nn.functional as F class Message_Passing_Unit_v1(nn.Module): def __init__(self, fea_size, filter_size=128): super(Message_Passing_Unit_v1, self).__init__() self.w = nn.Linear(fea_size * 2, filter_size, bias=True) self.fea_size = fea_size self.filter_size = filter_size def forward(self, unary_term, pair_term): if unary_term.size()[0] == 1 and pair_term.size()[0] > 1: unary_term = unary_term.expand(pair_term.size()[0], unary_term. size()[1]) if unary_term.size()[0] > 1 and pair_term.size()[0] == 1: pair_term = pair_term.expand(unary_term.size()[0], pair_term. size()[1]) gate = torch.cat([unary_term, pair_term], 1) gate = F.relu(gate) gate = torch.sigmoid(self.w(gate)).mean(1) output = pair_term * gate.view(-1, 1).expand(gate.size()[0], pair_term.size()[1]) return output def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'fea_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.utils.data from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_relu_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tmp11 = tl.full([1], 0, tl.int32) tmp12 = triton_helpers.maximum(tmp11, tmp10) tl.store(out_ptr0 + x2, tmp12, xmask) @triton.jit def triton_per_fused_mean_sigmoid_1(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 128 * x0), xmask, other=0.0) tmp1 = tl.sigmoid(tmp0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.where(xmask, tmp2, 0) tmp5 = tl.sum(tmp4, 1)[:, None] tl.store(out_ptr0 + x0, tmp5, xmask) @triton.jit def triton_poi_fused_mul_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = 128.0 tmp3 = tmp1 / tmp2 tmp4 = tmp0 * tmp3 tl.store(out_ptr0 + x2, tmp4, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (128, 8), (8, 1)) assert_size_stride(primals_4, (128,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_relu_0[grid(32)](primals_1, primals_2, buf0, 32, XBLOCK=32, num_warps=1, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 128), (128, 1), torch.float32) extern_kernels.addmm(primals_4, buf0, reinterpret_tensor(primals_3, (8, 128), (1, 8), 0), alpha=1, beta=1, out=buf1) del primals_3 del primals_4 buf2 = empty_strided_cuda((4,), (1,), torch.float32) triton_per_fused_mean_sigmoid_1[grid(4)](buf1, buf2, 4, 128, XBLOCK =1, num_warps=2, num_stages=1) buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_mul_2[grid(16)](primals_2, buf2, buf3, 16, XBLOCK= 16, num_warps=1, num_stages=1) del buf2 return buf3, primals_2, buf0, buf1 class Message_Passing_Unit_v1New(nn.Module): def __init__(self, fea_size, filter_size=128): super(Message_Passing_Unit_v1New, self).__init__() self.w = nn.Linear(fea_size * 2, filter_size, bias=True) self.fea_size = fea_size self.filter_size = filter_size def forward(self, input_0, input_1): primals_3 = self.w.weight primals_4 = self.w.bias primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
SpartaG117/scene_graph_benchmark
Message_Passing_Unit_v1
false
1,095
[ "MIT" ]
0
e2e49940dd2f752b1faf9ae26707435ba3441bcb
https://github.com/SpartaG117/scene_graph_benchmark/tree/e2e49940dd2f752b1faf9ae26707435ba3441bcb
import torch from torchvision.transforms import functional as F import torch.utils.data from torch import nn import torch.nn.functional as F class Model(nn.Module): def __init__(self, fea_size, filter_size=128): super().__init__() self.w = nn.Linear(fea_size * 2, filter_size, bias=True) self.fea_size = fea_size self.filter_size = filter_size def forward(self, unary_term, pair_term): if unary_term.size()[0] == 1 and pair_term.size()[0] > 1: unary_term = unary_term.expand(pair_term.size()[0], unary_term. size()[1]) if unary_term.size()[0] > 1 and pair_term.size()[0] == 1: pair_term = pair_term.expand(unary_term.size()[0], pair_term. size()[1]) gate = torch.cat([unary_term, pair_term], 1) gate = F.relu(gate) gate = torch.sigmoid(self.w(gate)).mean(1) output = pair_term * gate.view(-1, 1).expand(gate.size()[0], pair_term.size()[1]) return output def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [4]
ExpModule
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/55/c554z2kehw4y75kmgw5gvu2rebcutcxsad4kke4bln6vvbihauog.py # Topologically Sorted Source Nodes: [exp], Original ATen: [aten.exp] # Source node to ATen node mapping: # exp => exp # Graph fragment: # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%arg0_1,), kwargs = {}) triton_poi_fused_exp_0 = async_compile.triton('triton_poi_fused_exp_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_exp_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_exp_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl_math.exp(tmp0) tl.store(out_ptr0 + (x0), tmp1, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [exp], Original ATen: [aten.exp] stream0 = get_raw_stream(0) triton_poi_fused_exp_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class ExpModule(nn.Module): def __init__(self): super(ExpModule, self).__init__() def forward(self, x): return torch.exp(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_exp_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl_math.exp(tmp0) tl.store(out_ptr0 + x0, tmp1, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_exp_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf0, class ExpModuleNew(nn.Module): def __init__(self): super(ExpModuleNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
SimonTreu/sdvae
ExpModule
false
1,096
[ "MIT" ]
0
e0270b9b2acf2d66eec93870f1c5633c8f04d9ab
https://github.com/SimonTreu/sdvae/tree/e0270b9b2acf2d66eec93870f1c5633c8f04d9ab
import torch import torch.nn as nn class Model(nn.Module): def __init__(self): super().__init__() def forward(self, x): return torch.exp(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
EncoderLayer
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/fg/cfg742icmosiwp5ugziye26din5ueqx3v7ntptkkpyackudldrxs.py # Topologically Sorted Source Nodes: [eq], Original ATen: [aten.eq] # Source node to ATen node mapping: # eq => eq # Graph fragment: # %eq : [num_users=2] = call_function[target=torch.ops.aten.eq.Scalar](args = (%primals_8, 0), kwargs = {}) triton_poi_fused_eq_0 = async_compile.triton('triton_poi_fused_eq_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_eq_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_eq_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 0.0 tmp2 = tmp0 == tmp1 tl.store(out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/g7/cg74ay746yb3ivb67q6pz3e2xibwamviqoisgmyzopmnr47hrosh.py # Topologically Sorted Source Nodes: [attention_score_1, attention_score_2, attention_score_3], Original ATen: [aten.div, aten.masked_fill, aten._softmax] # Source node to ATen node mapping: # attention_score_1 => div # attention_score_2 => full_default, where # attention_score_3 => amax, exp, sub, sum_1 # Graph fragment: # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_11, 1.0), kwargs = {}) # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -1000000000.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%eq, %full_default, %div), kwargs = {}) # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%where, [-1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) triton_poi_fused__softmax_div_masked_fill_1 = async_compile.triton('triton_poi_fused__softmax_div_masked_fill_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*i1', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_div_masked_fill_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_div_masked_fill_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (4*x2), xmask, eviction_policy='evict_last').to(tl.int1) tmp1 = tl.load(in_ptr1 + (x2), xmask) tmp2 = tl.load(in_ptr2 + (4*x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (1 + (4*x2)), xmask, eviction_policy='evict_last').to(tl.int1) tmp9 = tl.load(in_ptr2 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr0 + (2 + (4*x2)), xmask, eviction_policy='evict_last').to(tl.int1) tmp15 = tl.load(in_ptr2 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp20 = tl.load(in_ptr0 + (3 + (4*x2)), xmask, eviction_policy='evict_last').to(tl.int1) tmp21 = tl.load(in_ptr2 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 * tmp2 tmp4 = 1.0 tmp5 = tmp3 * tmp4 tmp6 = -1000000000.0 tmp7 = tl.where(tmp0, tmp6, tmp5) tmp10 = tmp1 * tmp9 tmp11 = tmp10 * tmp4 tmp12 = tl.where(tmp8, tmp6, tmp11) tmp13 = triton_helpers.maximum(tmp7, tmp12) tmp16 = tmp1 * tmp15 tmp17 = tmp16 * tmp4 tmp18 = tl.where(tmp14, tmp6, tmp17) tmp19 = triton_helpers.maximum(tmp13, tmp18) tmp22 = tmp1 * tmp21 tmp23 = tmp22 * tmp4 tmp24 = tl.where(tmp20, tmp6, tmp23) tmp25 = triton_helpers.maximum(tmp19, tmp24) tmp26 = tmp7 - tmp25 tmp27 = tl_math.exp(tmp26) tmp28 = tmp12 - tmp25 tmp29 = tl_math.exp(tmp28) tmp30 = tmp27 + tmp29 tmp31 = tmp18 - tmp25 tmp32 = tl_math.exp(tmp31) tmp33 = tmp30 + tmp32 tmp34 = tmp24 - tmp25 tmp35 = tl_math.exp(tmp34) tmp36 = tmp33 + tmp35 tl.store(out_ptr0 + (x2), tmp25, xmask) tl.store(out_ptr1 + (x2), tmp36, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/na/cna5revtaqihb3t6bnp24kdg6eqircycsi7slr6uzmhkwpelq6x7.py # Topologically Sorted Source Nodes: [attention_score_1, attention_score_2, attention_score_3], Original ATen: [aten.div, aten.masked_fill, aten._softmax] # Source node to ATen node mapping: # attention_score_1 => div # attention_score_2 => full_default, where # attention_score_3 => div_1, exp, sub # Graph fragment: # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_11, 1.0), kwargs = {}) # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -1000000000.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%eq, %full_default, %div), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_div_masked_fill_2 = async_compile.triton('triton_poi_fused__softmax_div_masked_fill_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*i1', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_div_masked_fill_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_div_masked_fill_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x4 = (xindex // 4) x0 = xindex % 4 x2 = (xindex // 16) tmp0 = tl.load(in_ptr0 + (x3), xmask).to(tl.int1) tmp1 = tl.load(in_ptr1 + (x4), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + (x0 + (4*x2)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr3 + (x4), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr4 + (x4), xmask, eviction_policy='evict_last') tmp3 = tmp1 * tmp2 tmp4 = 1.0 tmp5 = tmp3 * tmp4 tmp6 = -1000000000.0 tmp7 = tl.where(tmp0, tmp6, tmp5) tmp9 = tmp7 - tmp8 tmp10 = tl_math.exp(tmp9) tmp12 = tmp10 / tmp11 tl.store(out_ptr0 + (x3), tmp12, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/tc/ctcugqu2nbqlxcf2thnspnnypxifbalbzmclmutd5vaxdes2oyyk.py # Topologically Sorted Source Nodes: [add, layer_norm], Original ATen: [aten.add, aten.native_layer_norm] # Source node to ATen node mapping: # add => add # layer_norm => var_mean # Graph fragment: # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_17, %primals_1), kwargs = {}) # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add, [2]), kwargs = {correction: 0, keepdim: True}) triton_poi_fused_add_native_layer_norm_3 = async_compile.triton('triton_poi_fused_add_native_layer_norm_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_native_layer_norm_3(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + (x0), tmp16, xmask) tl.store(out_ptr1 + (x0), tmp28, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/ua/cuaquah4oaz43nhi25wixnpzlhvf2zfzdiezhmmwkuy5wfhtw6z4.py # Topologically Sorted Source Nodes: [add, layer_norm], Original ATen: [aten.add, aten.native_layer_norm] # Source node to ATen node mapping: # add => add # layer_norm => add_1, add_2, mul_1, mul_2, rsqrt, sub_1 # Graph fragment: # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_17, %primals_1), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_1,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %getitem_1), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %rsqrt), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_1, %primals_11), kwargs = {}) # %add_2 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_2, %primals_12), kwargs = {}) triton_poi_fused_add_native_layer_norm_4 = async_compile.triton('triton_poi_fused_add_native_layer_norm_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_native_layer_norm_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x2), xmask) tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + (x2), tmp13, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/va/cvayouropyisaprtjrhemadbdvsels72axdjsrgmbayknhu335yc.py # Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # relu => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_19,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_5 = async_compile.triton('triton_poi_fused_relu_threshold_backward_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_5(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, xmask) tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/c3/cc3wjckmv52z5p6lagnrhsfwt53rzdfhvzlxkm5tgkwbs3kuzwax.py # Topologically Sorted Source Nodes: [add_1], Original ATen: [aten.add] # Source node to ATen node mapping: # add_1 => add_3 # Graph fragment: # %add_3 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_21, %add_2), kwargs = {}) triton_poi_fused_add_6 = async_compile.triton('triton_poi_fused_add_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_6', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_6(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x2), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/ly/clyngaczex3aq23lq4vvbajq6ig7vbayovzffexoltgvkl4mnmwv.py # Topologically Sorted Source Nodes: [layer_norm_1], Original ATen: [aten.native_layer_norm] # Source node to ATen node mapping: # layer_norm_1 => add_4, rsqrt_1, var_mean_1 # Graph fragment: # %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add_3, [2]), kwargs = {correction: 0, keepdim: True}) # %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-05), kwargs = {}) # %rsqrt_1 : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_4,), kwargs = {}) triton_poi_fused_native_layer_norm_7 = async_compile.triton('triton_poi_fused_native_layer_norm_7', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_layer_norm_7(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + (x0), tmp8, xmask) tl.store(out_ptr1 + (x0), tmp23, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/ev/cevwu4ha52ebsczlu4ai4awtkxr6r5mjdoyil2av5baz2emhshoo.py # Topologically Sorted Source Nodes: [layer_norm_1], Original ATen: [aten.native_layer_norm] # Source node to ATen node mapping: # layer_norm_1 => add_4, add_5, mul_3, mul_4, rsqrt_1, sub_2, var_mean_1 # Graph fragment: # %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add_3, [2]), kwargs = {correction: 0, keepdim: True}) # %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-05), kwargs = {}) # %rsqrt_1 : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_4,), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_3, %getitem_3), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_2, %rsqrt_1), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_3, %primals_17), kwargs = {}) # %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_4, %primals_18), kwargs = {}) triton_poi_fused_native_layer_norm_8 = async_compile.triton('triton_poi_fused_native_layer_norm_8', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_layer_norm_8(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4, ), (1, )) assert_size_stride(primals_8, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_9, (4, 4), (4, 1)) assert_size_stride(primals_10, (4, ), (1, )) assert_size_stride(primals_11, (4, ), (1, )) assert_size_stride(primals_12, (4, ), (1, )) assert_size_stride(primals_13, (4, 4), (4, 1)) assert_size_stride(primals_14, (4, ), (1, )) assert_size_stride(primals_15, (4, 4), (4, 1)) assert_size_stride(primals_16, (4, ), (1, )) assert_size_stride(primals_17, (4, ), (1, )) assert_size_stride(primals_18, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [prj_query], Original ATen: [aten.addmm] extern_kernels.addmm(primals_3, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_2 del primals_3 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [prj_key], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_4 del primals_5 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [prj_value], Original ATen: [aten.addmm] extern_kernels.addmm(primals_7, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_6 del primals_7 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [eq], Original ATen: [aten.eq] stream0 = get_raw_stream(0) triton_poi_fused_eq_0.run(primals_8, buf3, 256, grid=grid(256), stream=stream0) del primals_8 buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf5 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) # Topologically Sorted Source Nodes: [attention_score_1, attention_score_2, attention_score_3], Original ATen: [aten.div, aten.masked_fill, aten._softmax] triton_poi_fused__softmax_div_masked_fill_1.run(buf3, buf0, buf1, buf4, buf5, 64, grid=grid(64), stream=stream0) buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [attention_score_1, attention_score_2, attention_score_3], Original ATen: [aten.div, aten.masked_fill, aten._softmax] triton_poi_fused__softmax_div_masked_fill_2.run(buf3, buf0, buf1, buf4, buf5, buf6, 256, grid=grid(256), stream=stream0) buf7 = reinterpret_tensor(buf5, (16, 4, 1), (4, 1, 1), 0); del buf5 # reuse # Topologically Sorted Source Nodes: [result], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf6, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0), out=buf7) buf8 = reinterpret_tensor(buf4, (16, 4), (4, 1), 0); del buf4 # reuse # Topologically Sorted Source Nodes: [output_1], Original ATen: [aten.addmm] extern_kernels.addmm(primals_10, reinterpret_tensor(buf7, (16, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf8) del primals_10 buf9 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf10 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) # Topologically Sorted Source Nodes: [add, layer_norm], Original ATen: [aten.add, aten.native_layer_norm] triton_poi_fused_add_native_layer_norm_3.run(buf8, primals_1, buf9, buf10, 16, grid=grid(16), stream=stream0) buf11 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [add, layer_norm], Original ATen: [aten.add, aten.native_layer_norm] triton_poi_fused_add_native_layer_norm_4.run(buf8, primals_1, buf9, buf10, primals_11, primals_12, buf11, 64, grid=grid(64), stream=stream0) del primals_12 buf12 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf11, (16, 4), (4, 1), 0), reinterpret_tensor(primals_13, (4, 4), (1, 4), 0), out=buf12) buf13 = reinterpret_tensor(buf12, (4, 4, 4), (16, 4, 1), 0); del buf12 # reuse buf19 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_5.run(buf13, primals_14, buf19, 64, grid=grid(64), stream=stream0) del primals_14 buf14 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf13, (16, 4), (4, 1), 0), reinterpret_tensor(primals_15, (4, 4), (1, 4), 0), out=buf14) buf15 = reinterpret_tensor(buf14, (4, 4, 4), (16, 4, 1), 0); del buf14 # reuse # Topologically Sorted Source Nodes: [add_1], Original ATen: [aten.add] triton_poi_fused_add_6.run(buf15, primals_16, buf11, 64, grid=grid(64), stream=stream0) del primals_16 buf16 = buf9; del buf9 # reuse buf17 = buf10; del buf10 # reuse # Topologically Sorted Source Nodes: [layer_norm_1], Original ATen: [aten.native_layer_norm] triton_poi_fused_native_layer_norm_7.run(buf15, buf16, buf17, 16, grid=grid(16), stream=stream0) buf18 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [layer_norm_1], Original ATen: [aten.native_layer_norm] triton_poi_fused_native_layer_norm_8.run(buf15, buf16, buf17, primals_17, primals_18, buf18, 64, grid=grid(64), stream=stream0) del buf16 del buf17 del primals_18 return (buf18, primals_1, primals_11, primals_17, buf0, buf1, buf3, buf6, reinterpret_tensor(buf7, (16, 4), (4, 1), 0), buf8, reinterpret_tensor(buf11, (16, 4), (4, 1), 0), reinterpret_tensor(buf13, (16, 4), (4, 1), 0), buf15, primals_15, buf19, primals_13, primals_9, reinterpret_tensor(buf2, (16, 1, 4), (4, 1, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_13 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_14 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_15 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_16 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_17 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_18 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch import torch.nn as nn import torch.nn.functional as F class ScaledDotProductAttention(nn.Module): def __init__(self): super(ScaledDotProductAttention, self).__init__() def forward(self, query, key, value, mask=None): _1, _2, query_sequence_length, _3 = query.size() batch_size, num_head, key_sequence_length, size_per_head = key.size() query = query.view(batch_size, num_head, query_sequence_length, size_per_head) key = key.view(batch_size, num_head, size_per_head, key_sequence_length ) attention_score = torch.einsum('abcd, abde -> abce', query, key) attention_score = attention_score / math.sqrt(size_per_head) if mask is not None: attention_score = attention_score.masked_fill(mask == 0, - 1000000000.0) attention_score = F.softmax(attention_score, dim=-1) result = attention_score @ value return result, attention_score class MultiHeadAttention(nn.Module): def __init__(self, model_dim, key_dim, value_dim, num_head): super(MultiHeadAttention, self).__init__() self.model_dim = model_dim self.key_dim = key_dim self.value_dim = value_dim self.num_head = num_head self.Wq = nn.Linear(model_dim, key_dim) self.Wk = nn.Linear(model_dim, key_dim) self.Wv = nn.Linear(model_dim, value_dim) self.attention = ScaledDotProductAttention() self.Wo = nn.Linear(value_dim, model_dim) def forward(self, query, key, value, mask=None): prj_query = self.Wq(query) prj_key = self.Wk(key) prj_value = self.Wv(value) multihead_query = self.multihead_split(prj_query) multihead_key = self.multihead_split(prj_key) multihead_value = self.multihead_split(prj_value) attention_output, _attention_score = self.attention(multihead_query, multihead_key, multihead_value, mask=mask) output = self.multihead_concat(attention_output) output = self.Wo(output) return output def multihead_split(self, tensor): batch_size, sequence_length, hidden_size = tensor.size() size_per_head = hidden_size // self.num_head return tensor.view(batch_size, self.num_head, sequence_length, size_per_head) def multihead_concat(self, tensor): batch_size, num_head, sequence_length, size_per_head = tensor.size() hidden_size = num_head * size_per_head return tensor.view(batch_size, sequence_length, hidden_size) class FeedForward(nn.Module): def __init__(self, model_dim, hidden_dim, drop_prob): super(FeedForward, self).__init__() self.model_dim = model_dim self.hidden_dim = hidden_dim self.drop_prob = drop_prob self.linearlayer1 = nn.Linear(model_dim, hidden_dim) self.linearlayer2 = nn.Linear(hidden_dim, model_dim) self.relu = nn.ReLU() self.dropout = nn.Dropout(drop_prob) def forward(self, tensor): tensor = self.dropout(self.relu(self.linearlayer1(tensor))) return self.linearlayer2(tensor) class EncoderLayer(nn.Module): def __init__(self, model_dim, key_dim, value_dim, hidden_dim, num_head, drop_prob): super(EncoderLayer, self).__init__() self.attention = MultiHeadAttention(model_dim, key_dim, value_dim, num_head) self.normalization1 = nn.LayerNorm(model_dim) self.dropout1 = nn.Dropout(drop_prob) self.ffn = FeedForward(model_dim, hidden_dim, drop_prob) self.normalization2 = nn.LayerNorm(model_dim) self.dropout2 = nn.Dropout(drop_prob) def forward(self, tensor, source_mask): residual = tensor tensor = self.attention(query=tensor, key=tensor, value=tensor, mask=source_mask) tensor = self.dropout1(self.normalization1(tensor + residual)) residual = tensor tensor = self.ffn(tensor) tensor = self.dropout2(self.normalization2(tensor + residual)) return tensor def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'model_dim': 4, 'key_dim': 4, 'value_dim': 4, 'hidden_dim': 4, 'num_head': 4, 'drop_prob': 0.5}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import math import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_eq_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.0 tmp2 = tmp0 == tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused__softmax_div_masked_fill_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + 4 * x2, xmask, eviction_policy='evict_last').to(tl .int1) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp2 = tl.load(in_ptr2 + 4 * x1, xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (1 + 4 * x2), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp9 = tl.load(in_ptr2 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr0 + (2 + 4 * x2), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp15 = tl.load(in_ptr2 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp20 = tl.load(in_ptr0 + (3 + 4 * x2), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp21 = tl.load(in_ptr2 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp3 = tmp1 * tmp2 tmp4 = 1.0 tmp5 = tmp3 * tmp4 tmp6 = -1000000000.0 tmp7 = tl.where(tmp0, tmp6, tmp5) tmp10 = tmp1 * tmp9 tmp11 = tmp10 * tmp4 tmp12 = tl.where(tmp8, tmp6, tmp11) tmp13 = triton_helpers.maximum(tmp7, tmp12) tmp16 = tmp1 * tmp15 tmp17 = tmp16 * tmp4 tmp18 = tl.where(tmp14, tmp6, tmp17) tmp19 = triton_helpers.maximum(tmp13, tmp18) tmp22 = tmp1 * tmp21 tmp23 = tmp22 * tmp4 tmp24 = tl.where(tmp20, tmp6, tmp23) tmp25 = triton_helpers.maximum(tmp19, tmp24) tmp26 = tmp7 - tmp25 tmp27 = tl_math.exp(tmp26) tmp28 = tmp12 - tmp25 tmp29 = tl_math.exp(tmp28) tmp30 = tmp27 + tmp29 tmp31 = tmp18 - tmp25 tmp32 = tl_math.exp(tmp31) tmp33 = tmp30 + tmp32 tmp34 = tmp24 - tmp25 tmp35 = tl_math.exp(tmp34) tmp36 = tmp33 + tmp35 tl.store(out_ptr0 + x2, tmp25, xmask) tl.store(out_ptr1 + x2, tmp36, xmask) @triton.jit def triton_poi_fused__softmax_div_masked_fill_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x4 = xindex // 4 x0 = xindex % 4 x2 = xindex // 16 tmp0 = tl.load(in_ptr0 + x3, xmask).to(tl.int1) tmp1 = tl.load(in_ptr1 + x4, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp8 = tl.load(in_ptr3 + x4, xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr4 + x4, xmask, eviction_policy='evict_last') tmp3 = tmp1 * tmp2 tmp4 = 1.0 tmp5 = tmp3 * tmp4 tmp6 = -1000000000.0 tmp7 = tl.where(tmp0, tmp6, tmp5) tmp9 = tmp7 - tmp8 tmp10 = tl_math.exp(tmp9) tmp12 = tmp10 / tmp11 tl.store(out_ptr0 + x3, tmp12, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_3(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + x0, tmp16, xmask) tl.store(out_ptr1 + x0, tmp28, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_5(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_add_6(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x2, xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_native_layer_norm_7(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_native_layer_norm_8(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18 ) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_9, (4, 4), (4, 1)) assert_size_stride(primals_10, (4,), (1,)) assert_size_stride(primals_11, (4,), (1,)) assert_size_stride(primals_12, (4,), (1,)) assert_size_stride(primals_13, (4, 4), (4, 1)) assert_size_stride(primals_14, (4,), (1,)) assert_size_stride(primals_15, (4, 4), (4, 1)) assert_size_stride(primals_16, (4,), (1,)) assert_size_stride(primals_17, (4,), (1,)) assert_size_stride(primals_18, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_3, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_2 del primals_3 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf1) del primals_4 del primals_5 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf2) del primals_6 del primals_7 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_eq_0[grid(256)](primals_8, buf3, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_8 buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf5 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) triton_poi_fused__softmax_div_masked_fill_1[grid(64)](buf3, buf0, buf1, buf4, buf5, 64, XBLOCK=64, num_warps=1, num_stages=1) buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_div_masked_fill_2[grid(256)](buf3, buf0, buf1, buf4, buf5, buf6, 256, XBLOCK=128, num_warps=4, num_stages=1) buf7 = reinterpret_tensor(buf5, (16, 4, 1), (4, 1, 1), 0) del buf5 extern_kernels.bmm(reinterpret_tensor(buf6, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0), out=buf7) buf8 = reinterpret_tensor(buf4, (16, 4), (4, 1), 0) del buf4 extern_kernels.addmm(primals_10, reinterpret_tensor(buf7, (16, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf8) del primals_10 buf9 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf10 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) triton_poi_fused_add_native_layer_norm_3[grid(16)](buf8, primals_1, buf9, buf10, 16, XBLOCK=16, num_warps=1, num_stages=1) buf11 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_native_layer_norm_4[grid(64)](buf8, primals_1, buf9, buf10, primals_11, primals_12, buf11, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_12 buf12 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf11, (16, 4), (4, 1), 0), reinterpret_tensor(primals_13, (4, 4), (1, 4), 0), out=buf12) buf13 = reinterpret_tensor(buf12, (4, 4, 4), (16, 4, 1), 0) del buf12 buf19 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_5[grid(64)](buf13, primals_14, buf19, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_14 buf14 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf13, (16, 4), (4, 1), 0), reinterpret_tensor(primals_15, (4, 4), (1, 4), 0), out=buf14) buf15 = reinterpret_tensor(buf14, (4, 4, 4), (16, 4, 1), 0) del buf14 triton_poi_fused_add_6[grid(64)](buf15, primals_16, buf11, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_16 buf16 = buf9 del buf9 buf17 = buf10 del buf10 triton_poi_fused_native_layer_norm_7[grid(16)](buf15, buf16, buf17, 16, XBLOCK=16, num_warps=1, num_stages=1) buf18 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_native_layer_norm_8[grid(64)](buf15, buf16, buf17, primals_17, primals_18, buf18, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf16 del buf17 del primals_18 return (buf18, primals_1, primals_11, primals_17, buf0, buf1, buf3, buf6, reinterpret_tensor(buf7, (16, 4), (4, 1), 0), buf8, reinterpret_tensor(buf11, (16, 4), (4, 1), 0), reinterpret_tensor( buf13, (16, 4), (4, 1), 0), buf15, primals_15, buf19, primals_13, primals_9, reinterpret_tensor(buf2, (16, 1, 4), (4, 1, 1), 0)) class ScaledDotProductAttention(nn.Module): def __init__(self): super(ScaledDotProductAttention, self).__init__() def forward(self, query, key, value, mask=None): _1, _2, query_sequence_length, _3 = query.size() batch_size, num_head, key_sequence_length, size_per_head = key.size() query = query.view(batch_size, num_head, query_sequence_length, size_per_head) key = key.view(batch_size, num_head, size_per_head, key_sequence_length ) attention_score = torch.einsum('abcd, abde -> abce', query, key) attention_score = attention_score / math.sqrt(size_per_head) if mask is not None: attention_score = attention_score.masked_fill(mask == 0, - 1000000000.0) attention_score = F.softmax(attention_score, dim=-1) result = attention_score @ value return result, attention_score class MultiHeadAttention(nn.Module): def __init__(self, model_dim, key_dim, value_dim, num_head): super(MultiHeadAttention, self).__init__() self.model_dim = model_dim self.key_dim = key_dim self.value_dim = value_dim self.num_head = num_head self.Wq = nn.Linear(model_dim, key_dim) self.Wk = nn.Linear(model_dim, key_dim) self.Wv = nn.Linear(model_dim, value_dim) self.attention = ScaledDotProductAttention() self.Wo = nn.Linear(value_dim, model_dim) def forward(self, query, key, value, mask=None): prj_query = self.Wq(query) prj_key = self.Wk(key) prj_value = self.Wv(value) multihead_query = self.multihead_split(prj_query) multihead_key = self.multihead_split(prj_key) multihead_value = self.multihead_split(prj_value) attention_output, _attention_score = self.attention(multihead_query, multihead_key, multihead_value, mask=mask) output = self.multihead_concat(attention_output) output = self.Wo(output) return output def multihead_split(self, tensor): batch_size, sequence_length, hidden_size = tensor.size() size_per_head = hidden_size // self.num_head return tensor.view(batch_size, self.num_head, sequence_length, size_per_head) def multihead_concat(self, tensor): batch_size, num_head, sequence_length, size_per_head = tensor.size() hidden_size = num_head * size_per_head return tensor.view(batch_size, sequence_length, hidden_size) class FeedForward(nn.Module): def __init__(self, model_dim, hidden_dim, drop_prob): super(FeedForward, self).__init__() self.model_dim = model_dim self.hidden_dim = hidden_dim self.drop_prob = drop_prob self.linearlayer1 = nn.Linear(model_dim, hidden_dim) self.linearlayer2 = nn.Linear(hidden_dim, model_dim) self.relu = nn.ReLU() self.dropout = nn.Dropout(drop_prob) def forward(self, tensor): tensor = self.dropout(self.relu(self.linearlayer1(tensor))) return self.linearlayer2(tensor) class EncoderLayerNew(nn.Module): def __init__(self, model_dim, key_dim, value_dim, hidden_dim, num_head, drop_prob): super(EncoderLayerNew, self).__init__() self.attention = MultiHeadAttention(model_dim, key_dim, value_dim, num_head) self.normalization1 = nn.LayerNorm(model_dim) self.dropout1 = nn.Dropout(drop_prob) self.ffn = FeedForward(model_dim, hidden_dim, drop_prob) self.normalization2 = nn.LayerNorm(model_dim) self.dropout2 = nn.Dropout(drop_prob) def forward(self, input_0, input_1): primals_2 = self.attention.Wq.weight primals_3 = self.attention.Wq.bias primals_4 = self.attention.Wk.weight primals_5 = self.attention.Wk.bias primals_6 = self.attention.Wv.weight primals_7 = self.attention.Wv.bias primals_9 = self.attention.Wo.weight primals_10 = self.attention.Wo.bias primals_11 = self.normalization1.weight primals_12 = self.normalization1.bias primals_13 = self.ffn.linearlayer1.weight primals_14 = self.ffn.linearlayer1.bias primals_15 = self.ffn.linearlayer2.weight primals_16 = self.ffn.linearlayer2.bias primals_17 = self.normalization2.weight primals_18 = self.normalization2.bias primals_1 = input_0 primals_8 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18]) return output[0]
SeungoneKim/Transformer_implementation
EncoderLayer
false
1,097
[ "Apache-2.0" ]
0
a52bf552eb645fc9bfb812cc26842fc147d6c008
https://github.com/SeungoneKim/Transformer_implementation/tree/a52bf552eb645fc9bfb812cc26842fc147d6c008
import math import torch import torch.nn as nn import torch.nn.functional as F class ScaledDotProductAttention(nn.Module): def __init__(self): super().__init__() def forward(self, query, key, value, mask=None): _1, _2, query_sequence_length, _3 = query.size() batch_size, num_head, key_sequence_length, size_per_head = key.size() query = query.view(batch_size, num_head, query_sequence_length, size_per_head) key = key.view(batch_size, num_head, size_per_head, key_sequence_length ) attention_score = torch.einsum('abcd, abde -> abce', query, key) attention_score = attention_score / math.sqrt(size_per_head) if mask is not None: attention_score = attention_score.masked_fill(mask == 0, - 1000000000.0) attention_score = F.softmax(attention_score, dim=-1) result = attention_score @ value return result, attention_score class MultiHeadAttention(nn.Module): def __init__(self, model_dim, key_dim, value_dim, num_head): super().__init__() self.model_dim = model_dim self.key_dim = key_dim self.value_dim = value_dim self.num_head = num_head self.Wq = nn.Linear(model_dim, key_dim) self.Wk = nn.Linear(model_dim, key_dim) self.Wv = nn.Linear(model_dim, value_dim) self.attention = ScaledDotProductAttention() self.Wo = nn.Linear(value_dim, model_dim) def forward(self, query, key, value, mask=None): prj_query = self.Wq(query) prj_key = self.Wk(key) prj_value = self.Wv(value) multihead_query = self.multihead_split(prj_query) multihead_key = self.multihead_split(prj_key) multihead_value = self.multihead_split(prj_value) attention_output, _attention_score = self.attention(multihead_query, multihead_key, multihead_value, mask=mask) output = self.multihead_concat(attention_output) output = self.Wo(output) return output def multihead_split(self, tensor): batch_size, sequence_length, hidden_size = tensor.size() size_per_head = hidden_size // self.num_head return tensor.view(batch_size, self.num_head, sequence_length, size_per_head) def multihead_concat(self, tensor): batch_size, num_head, sequence_length, size_per_head = tensor.size() hidden_size = num_head * size_per_head return tensor.view(batch_size, sequence_length, hidden_size) class FeedForward(nn.Module): def __init__(self, model_dim, hidden_dim, drop_prob): super().__init__() self.model_dim = model_dim self.hidden_dim = hidden_dim self.drop_prob = drop_prob self.linearlayer1 = nn.Linear(model_dim, hidden_dim) self.linearlayer2 = nn.Linear(hidden_dim, model_dim) self.relu = nn.ReLU() self.dropout = nn.Dropout(drop_prob) def forward(self, tensor): tensor = self.dropout(self.relu(self.linearlayer1(tensor))) return self.linearlayer2(tensor) class Model(nn.Module): def __init__(self, model_dim, key_dim, value_dim, hidden_dim, num_head, drop_prob): super().__init__() self.attention = MultiHeadAttention(model_dim, key_dim, value_dim, num_head) self.normalization1 = nn.LayerNorm(model_dim) self.dropout1 = nn.Dropout(drop_prob) self.ffn = FeedForward(model_dim, hidden_dim, drop_prob) self.normalization2 = nn.LayerNorm(model_dim) self.dropout2 = nn.Dropout(drop_prob) def forward(self, tensor, source_mask): residual = tensor tensor = self.attention(query=tensor, key=tensor, value=tensor, mask=source_mask) tensor = self.dropout1(self.normalization1(tensor + residual)) residual = tensor tensor = self.ffn(tensor) tensor = self.dropout2(self.normalization2(tensor + residual)) # ... truncated (>4000 chars) for memory efficiency
Residual_Covolution
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/td/ctdybbibnws4d7ukbk3fpn35zkgapxylowdhzwx7vgsllncbdrxa.py # Topologically Sorted Source Nodes: [dow1, dow1_1], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # dow1 => convolution # dow1_1 => relu # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [12, 12], [12, 12], False, [0, 0], 1), kwargs = {}) # %relu : [num_users=3] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/32/c32v7egt4mupqssam3gmac2qgv3ujprjybthsgweflmot256qqw7.py # Topologically Sorted Source Nodes: [seg], Original ATen: [aten.convolution] # Source node to ATen node mapping: # seg => convolution_1 # Graph fragment: # %convolution_1 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [1, 1], [12, 12], [12, 12], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/tr/ctr6ssoy3ohhjpgpoxiqv2ojm7yzgqk5hgm4lemyjegkqr4ohtne.py # Topologically Sorted Source Nodes: [inc1, relu_1, add1], Original ATen: [aten.convolution, aten.relu, aten.add, aten.threshold_backward] # Source node to ATen node mapping: # add1 => add # inc1 => convolution_2 # relu_1 => relu_1 # Graph fragment: # %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%convolution_1, %primals_6, %primals_7, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_2,), kwargs = {}) # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%relu, %relu_1), kwargs = {}) # %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {}) triton_poi_fused_add_convolution_relu_threshold_backward_2 = async_compile.triton('triton_poi_fused_add_convolution_relu_threshold_backward_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*i1', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_relu_threshold_backward_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_convolution_relu_threshold_backward_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 4 tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr1 + (x3), xmask) tmp2 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tl.full([1], 0, tl.int32) tmp5 = triton_helpers.maximum(tmp4, tmp3) tmp6 = tmp0 + tmp5 tmp7 = 0.0 tmp8 = tmp5 <= tmp7 tl.store(out_ptr0 + (x3), tmp6, xmask) tl.store(out_ptr1 + (x3), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_7, (4, ), (1, )) assert_size_stride(primals_8, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_9, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [dow1], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(12, 12), dilation=(12, 12), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [dow1, dow1_1], Original ATen: [aten.convolution, aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 256, grid=grid(256), stream=stream0) del primals_2 # Topologically Sorted Source Nodes: [seg], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(12, 12), dilation=(12, 12), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1)) buf3 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [seg], Original ATen: [aten.convolution] triton_poi_fused_convolution_1.run(buf3, primals_5, 256, grid=grid(256), stream=stream0) del primals_5 # Topologically Sorted Source Nodes: [inc1], Original ATen: [aten.convolution] buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 4, 4, 4), (64, 16, 4, 1)) buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [inc1, relu_1, add1], Original ATen: [aten.convolution, aten.relu, aten.add, aten.threshold_backward] triton_poi_fused_add_convolution_relu_threshold_backward_2.run(buf1, buf4, primals_7, buf5, buf9, 256, grid=grid(256), stream=stream0) del primals_7 # Topologically Sorted Source Nodes: [inc2], Original ATen: [aten.convolution] buf6 = extern_kernels.convolution(buf5, primals_8, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 4, 4, 4), (64, 16, 4, 1)) buf7 = buf4; del buf4 # reuse buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [inc2, relu_2, out], Original ATen: [aten.convolution, aten.relu, aten.add, aten.threshold_backward] triton_poi_fused_add_convolution_relu_threshold_backward_2.run(primals_3, buf6, primals_9, buf7, buf8, 256, grid=grid(256), stream=stream0) del buf6 del primals_9 return (buf7, buf3, primals_1, primals_3, primals_4, primals_6, primals_8, buf1, buf3, buf5, buf8, buf9, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class Residual_Covolution(nn.Module): def __init__(self, icol, ocol, num_classes): super(Residual_Covolution, self).__init__() self.conv1 = nn.Conv2d(icol, ocol, kernel_size=3, stride=1, padding =12, dilation=12, bias=True) self.conv2 = nn.Conv2d(ocol, num_classes, kernel_size=3, stride=1, padding=12, dilation=12, bias=True) self.conv3 = nn.Conv2d(num_classes, ocol, kernel_size=1, stride=1, padding=0, dilation=1, bias=True) self.conv4 = nn.Conv2d(ocol, icol, kernel_size=1, stride=1, padding =0, dilation=1, bias=True) self.relu = nn.ReLU(inplace=True) def forward(self, x): dow1 = self.conv1(x) dow1 = self.relu(dow1) seg = self.conv2(dow1) inc1 = self.conv3(seg) add1 = dow1 + self.relu(inc1) inc2 = self.conv4(add1) out = x + self.relu(inc2) return out, seg def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'icol': 4, 'ocol': 4, 'num_classes': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused_add_convolution_relu_threshold_backward_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x3, xmask) tmp2 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tl.full([1], 0, tl.int32) tmp5 = triton_helpers.maximum(tmp4, tmp3) tmp6 = tmp0 + tmp5 tmp7 = 0.0 tmp8 = tmp5 <= tmp7 tl.store(out_ptr0 + x3, tmp6, xmask) tl.store(out_ptr1 + x3, tmp8, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_9, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(12, 12), dilation=(12, 12), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(256)](buf1, primals_2, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(12, 12), dilation=(12, 12), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_1[grid(256)](buf3, primals_5, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 4, 4, 4), (64, 16, 4, 1)) buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_add_convolution_relu_threshold_backward_2[grid(256)]( buf1, buf4, primals_7, buf5, buf9, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_7 buf6 = extern_kernels.convolution(buf5, primals_8, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 4, 4, 4), (64, 16, 4, 1)) buf7 = buf4 del buf4 buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_add_convolution_relu_threshold_backward_2[grid(256)]( primals_3, buf6, primals_9, buf7, buf8, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf6 del primals_9 return (buf7, buf3, primals_1, primals_3, primals_4, primals_6, primals_8, buf1, buf3, buf5, buf8, buf9) class Residual_CovolutionNew(nn.Module): def __init__(self, icol, ocol, num_classes): super(Residual_CovolutionNew, self).__init__() self.conv1 = nn.Conv2d(icol, ocol, kernel_size=3, stride=1, padding =12, dilation=12, bias=True) self.conv2 = nn.Conv2d(ocol, num_classes, kernel_size=3, stride=1, padding=12, dilation=12, bias=True) self.conv3 = nn.Conv2d(num_classes, ocol, kernel_size=1, stride=1, padding=0, dilation=1, bias=True) self.conv4 = nn.Conv2d(ocol, icol, kernel_size=1, stride=1, padding =0, dilation=1, bias=True) self.relu = nn.ReLU(inplace=True) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.conv3.weight primals_7 = self.conv3.bias primals_8 = self.conv4.weight primals_9 = self.conv4.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0], output[1]
SultanAbuGhazal/CGNet
Residual_Covolution
false
1,098
[ "MIT" ]
0
f10b976b984ba09be26b902ed4da97cd1311cf17
https://github.com/SultanAbuGhazal/CGNet/tree/f10b976b984ba09be26b902ed4da97cd1311cf17
import torch import torch.nn as nn class Model(nn.Module): def __init__(self, icol, ocol, num_classes): super().__init__() self.conv1 = nn.Conv2d(icol, ocol, kernel_size=3, stride=1, padding =12, dilation=12, bias=True) self.conv2 = nn.Conv2d(ocol, num_classes, kernel_size=3, stride=1, padding=12, dilation=12, bias=True) self.conv3 = nn.Conv2d(num_classes, ocol, kernel_size=1, stride=1, padding=0, dilation=1, bias=True) self.conv4 = nn.Conv2d(ocol, icol, kernel_size=1, stride=1, padding =0, dilation=1, bias=True) self.relu = nn.ReLU(inplace=True) def forward(self, x): dow1 = self.conv1(x) dow1 = self.relu(dow1) seg = self.conv2(dow1) inc1 = self.conv3(seg) add1 = dow1 + self.relu(inc1) inc2 = self.conv4(add1) out = x + self.relu(inc2) return out, seg def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 4, 4]
ResidualBlockNoBN
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/td/ctdybbibnws4d7ukbk3fpn35zkgapxylowdhzwx7vgsllncbdrxa.py # Topologically Sorted Source Nodes: [conv2d, out], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d => convolution # out => relu # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/l3/cl3kktfjbfxvoqsgvjon5fk5ycnqjp7n2a3dk3gk4gw4n2jfe25m.py # Topologically Sorted Source Nodes: [out_1, out_2, out_3], Original ATen: [aten.convolution, aten.add, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # out_1 => convolution_1 # out_2 => add # out_3 => relu_1 # Graph fragment: # %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution_1, %primals_3), kwargs = {}) # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {}) triton_poi_fused_add_convolution_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_add_convolution_relu_threshold_backward_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_convolution_relu_threshold_backward_1(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x3), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp5 = tl.full([1], 0, tl.int32) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = 0.0 tmp8 = tmp6 <= tmp7 tl.store(in_out_ptr0 + (x3), tmp6, xmask) tl.store(out_ptr0 + (x3), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [conv2d, out], Original ATen: [aten.convolution, aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 256, grid=grid(256), stream=stream0) del primals_2 # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1)) buf3 = buf2; del buf2 # reuse buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [out_1, out_2, out_3], Original ATen: [aten.convolution, aten.add, aten.relu, aten.threshold_backward] triton_poi_fused_add_convolution_relu_threshold_backward_1.run(buf3, primals_5, primals_3, buf4, 256, grid=grid(256), stream=stream0) del primals_5 return (buf3, primals_1, primals_3, primals_4, buf1, buf4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn class ResidualBlockNoBN(nn.Module): def __init__(self, in_channels, out_channels, stride=1): super(ResidualBlockNoBN, self).__init__() self.conv1 = nn.Conv2d(in_channels=in_channels, out_channels= out_channels, kernel_size=(3, 3), stride=stride, padding=1, bias=True) self.conv2 = nn.Conv2d(in_channels=out_channels, out_channels= out_channels, kernel_size=(3, 3), stride=1, padding=1, bias=True) self.shortcut = nn.Sequential() if stride != 1 or in_channels != out_channels: self.shortcut = nn.Sequential(nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=(1, 1), stride= stride, bias=False)) def forward(self, x): out = nn.ReLU()(self.conv1(x)) out = self.conv2(out) out += self.shortcut(x) out = nn.ReLU()(out) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_add_convolution_relu_threshold_backward_1(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x3, xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp5 = tl.full([1], 0, tl.int32) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = 0.0 tmp8 = tmp6 <= tmp7 tl.store(in_out_ptr0 + x3, tmp6, xmask) tl.store(out_ptr0 + x3, tmp8, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(256)](buf1, primals_2, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1)) buf3 = buf2 del buf2 buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_add_convolution_relu_threshold_backward_1[grid(256)]( buf3, primals_5, primals_3, buf4, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 return buf3, primals_1, primals_3, primals_4, buf1, buf4 class ResidualBlockNoBNNew(nn.Module): def __init__(self, in_channels, out_channels, stride=1): super(ResidualBlockNoBNNew, self).__init__() self.conv1 = nn.Conv2d(in_channels=in_channels, out_channels= out_channels, kernel_size=(3, 3), stride=stride, padding=1, bias=True) self.conv2 = nn.Conv2d(in_channels=out_channels, out_channels= out_channels, kernel_size=(3, 3), stride=1, padding=1, bias=True) self.shortcut = nn.Sequential() if stride != 1 or in_channels != out_channels: self.shortcut = nn.Sequential(nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=(1, 1), stride= stride, bias=False)) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
Suvapna/ArtificialLaughter
ResidualBlockNoBN
false
1,100
[ "MIT" ]
0
a7114134b698f829e05e74cac30052e18b260f85
https://github.com/Suvapna/ArtificialLaughter/tree/a7114134b698f829e05e74cac30052e18b260f85
import torch from torch import nn class Model(nn.Module): def __init__(self, in_channels, out_channels, stride=1): super().__init__() self.conv1 = nn.Conv2d(in_channels=in_channels, out_channels= out_channels, kernel_size=(3, 3), stride=stride, padding=1, bias=True) self.conv2 = nn.Conv2d(in_channels=out_channels, out_channels= out_channels, kernel_size=(3, 3), stride=1, padding=1, bias=True) self.shortcut = nn.Sequential() if stride != 1 or in_channels != out_channels: self.shortcut = nn.Sequential(nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=(1, 1), stride= stride, bias=False)) def forward(self, x): out = nn.ReLU()(self.conv1(x)) out = self.conv2(out) out += self.shortcut(x) out = nn.ReLU()(out) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 4]
SpatialAttention
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/46/c46mg7rvdztu6n5oosf5c4if7ziag6obrxhwbn43lcdfibfuom7w.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.cat] # Source node to ATen node mapping: # x => cat # Graph fragment: # %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%mean, %getitem], 1), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 16) % 2 x0 = xindex % 16 x2 = (xindex // 32) x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + (64*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp9 = tmp7 + tmp8 tmp10 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tmp9 + tmp10 tmp12 = 4.0 tmp13 = tmp11 / tmp12 tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype) tmp15 = tl.where(tmp4, tmp13, tmp14) tmp16 = tmp0 >= tmp3 tmp17 = tl.full([1], 2, tl.int64) tmp18 = tmp0 < tmp17 tmp19 = tl.load(in_ptr0 + (x0 + (64*x2)), tmp16 & xmask, eviction_policy='evict_last', other=0.0) tmp20 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), tmp16 & xmask, eviction_policy='evict_last', other=0.0) tmp21 = triton_helpers.maximum(tmp19, tmp20) tmp22 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), tmp16 & xmask, eviction_policy='evict_last', other=0.0) tmp23 = triton_helpers.maximum(tmp21, tmp22) tmp24 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), tmp16 & xmask, eviction_policy='evict_last', other=0.0) tmp25 = triton_helpers.maximum(tmp23, tmp24) tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype) tmp27 = tl.where(tmp16, tmp25, tmp26) tmp28 = tl.where(tmp4, tmp15, tmp27) tl.store(out_ptr0 + (x3), tmp28, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/go/cgofqcgduqrtcjakfd7uk3wkcrpwsqxispluihwsstry6ekodk2u.py # Topologically Sorted Source Nodes: [x_1, sigmoid], Original ATen: [aten.convolution, aten.sigmoid] # Source node to ATen node mapping: # sigmoid => sigmoid # x_1 => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%cat, %primals_2, %primals_3, [1, 1], [3, 3], [1, 1], False, [0, 0], 1), kwargs = {}) # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%convolution,), kwargs = {}) triton_poi_fused_convolution_sigmoid_1 = async_compile.triton('triton_poi_fused_convolution_sigmoid_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_sigmoid_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_sigmoid_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr0 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.sigmoid(tmp3) tl.store(in_out_ptr0 + (x0), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1, 2, 7, 7), (98, 49, 7, 1)) assert_size_stride(primals_3, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 2, 4, 4), (32, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(primals_1, buf0, 128, grid=grid(128), stream=stream0) del primals_1 # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 1, 4, 4), (16, 16, 4, 1)) buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [x_1, sigmoid], Original ATen: [aten.convolution, aten.sigmoid] triton_poi_fused_convolution_sigmoid_1.run(buf2, primals_3, 64, grid=grid(64), stream=stream0) del primals_3 return (buf2, primals_2, buf0, buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((1, 2, 7, 7), (98, 49, 7, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class SpatialAttention(nn.Module): def __init__(self, kernel_size=7, bias=True): super(SpatialAttention, self).__init__() assert kernel_size in (3, 7), 'kernel size must be 3 or 7' padding = 3 if kernel_size == 7 else 1 self.conv1 = nn.Conv2d(2, 1, kernel_size, padding=padding, bias=bias) self.sigmoid = nn.Sigmoid() def init_weighs(self): normal_init(self.spatial_layer.conv1, std=0.01) def forward(self, x): avg_out = torch.mean(x, dim=1, keepdim=True) max_out, _ = torch.max(x, dim=1, keepdim=True) x = torch.cat([avg_out, max_out], dim=1) x = self.conv1(x) return self.sigmoid(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 % 2 x0 = xindex % 16 x2 = xindex // 32 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 64 * x2), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp9 = tmp7 + tmp8 tmp10 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tmp9 + tmp10 tmp12 = 4.0 tmp13 = tmp11 / tmp12 tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype) tmp15 = tl.where(tmp4, tmp13, tmp14) tmp16 = tmp0 >= tmp3 tl.full([1], 2, tl.int64) tmp19 = tl.load(in_ptr0 + (x0 + 64 * x2), tmp16 & xmask, eviction_policy='evict_last', other=0.0) tmp20 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), tmp16 & xmask, eviction_policy='evict_last', other=0.0) tmp21 = triton_helpers.maximum(tmp19, tmp20) tmp22 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), tmp16 & xmask, eviction_policy='evict_last', other=0.0) tmp23 = triton_helpers.maximum(tmp21, tmp22) tmp24 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), tmp16 & xmask, eviction_policy='evict_last', other=0.0) tmp25 = triton_helpers.maximum(tmp23, tmp24) tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype) tmp27 = tl.where(tmp16, tmp25, tmp26) tmp28 = tl.where(tmp4, tmp15, tmp27) tl.store(out_ptr0 + x3, tmp28, xmask) @triton.jit def triton_poi_fused_convolution_sigmoid_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.sigmoid(tmp3) tl.store(in_out_ptr0 + x0, tmp4, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1, 2, 7, 7), (98, 49, 7, 1)) assert_size_stride(primals_3, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 2, 4, 4), (32, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(128)](primals_1, buf0, 128, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 1, 4, 4), (16, 16, 4, 1)) buf2 = buf1 del buf1 triton_poi_fused_convolution_sigmoid_1[grid(64)](buf2, primals_3, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_3 return buf2, primals_2, buf0, buf2 class SpatialAttentionNew(nn.Module): def __init__(self, kernel_size=7, bias=True): super(SpatialAttentionNew, self).__init__() assert kernel_size in (3, 7), 'kernel size must be 3 or 7' padding = 3 if kernel_size == 7 else 1 self.conv1 = nn.Conv2d(2, 1, kernel_size, padding=padding, bias=bias) self.sigmoid = nn.Sigmoid() def init_weighs(self): normal_init(self.spatial_layer.conv1, std=0.01) def forward(self, input_0): primals_2 = self.conv1.weight primals_3 = self.conv1.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
SuzaKrish/mmdetection
SpatialAttention
false
1,101
[ "Apache-2.0" ]
0
31c16891d7493252262e738bcbf05326dba866b2
https://github.com/SuzaKrish/mmdetection/tree/31c16891d7493252262e738bcbf05326dba866b2
import torch import torch.nn as nn class Model(nn.Module): def __init__(self, kernel_size=7, bias=True): super().__init__() assert kernel_size in (3, 7), 'kernel size must be 3 or 7' padding = 3 if kernel_size == 7 else 1 self.conv1 = nn.Conv2d(2, 1, kernel_size, padding=padding, bias=bias) self.sigmoid = nn.Sigmoid() def init_weighs(self): normal_init(self.spatial_layer.conv1, std=0.01) def forward(self, x): avg_out = torch.mean(x, dim=1, keepdim=True) max_out, _ = torch.max(x, dim=1, keepdim=True) x = torch.cat([avg_out, max_out], dim=1) x = self.conv1(x) return self.sigmoid(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
Message_Passing_Unit_v2
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/wt/cwtjehqlpgsottzm6ypzegm4s3jeoie6y6l4z5tnv4psexwmszst.py # Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu] # Source node to ATen node mapping: # relu => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%primals_1,), kwargs = {}) triton_poi_fused_relu_0 = async_compile.triton('triton_poi_fused_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tl.store(out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/7e/c7ewtx3sknj4widkgiiygn7cetpykui7eqvnbrietgcfvo46wzhb.py # Topologically Sorted Source Nodes: [gate, sum_1, gate_1], Original ATen: [aten.mul, aten.sum, aten.sigmoid, aten.sigmoid_backward] # Source node to ATen node mapping: # gate => mul # gate_1 => sigmoid # sum_1 => sum_1 # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%addmm, %addmm_1), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {}) # %sigmoid : [num_users=3] = call_function[target=torch.ops.aten.sigmoid.default](args = (%sum_1,), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %sigmoid), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %sub), kwargs = {}) triton_per_fused_mul_sigmoid_sigmoid_backward_sum_1 = async_compile.triton('triton_per_fused_mul_sigmoid_sigmoid_backward_sum_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[4, 128], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mul_sigmoid_sigmoid_backward_sum_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_mul_sigmoid_sigmoid_backward_sum_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 4 rnumel = 128 RBLOCK: tl.constexpr = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (128*x0)), xmask, other=0.0) tmp1 = tl.load(in_ptr1 + (r1 + (128*x0)), xmask, other=0.0) tmp2 = tmp0 * tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = tl.where(xmask, tmp3, 0) tmp6 = tl.sum(tmp5, 1)[:, None] tmp7 = tl.sigmoid(tmp6) tmp8 = 1.0 tmp9 = tmp8 - tmp7 tmp10 = tmp7 * tmp9 tl.store(out_ptr1 + (x0), tmp10, xmask) tl.store(out_ptr0 + (x0), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/ts/ctsemlty4h3qjpwtckapy4evmw7oic6xfmvqzokqs4ma7l3irspn.py # Topologically Sorted Source Nodes: [output], Original ATen: [aten.mul] # Source node to ATen node mapping: # output => mul_1 # Graph fragment: # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %expand), kwargs = {}) triton_poi_fused_mul_2 = async_compile.triton('triton_poi_fused_mul_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp2 = tl.sigmoid(tmp1) tmp3 = tmp0 * tmp2 tl.store(out_ptr0 + (x2), tmp3, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (128, 4), (4, 1)) assert_size_stride(primals_4, (128, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_relu_0.run(primals_1, buf0, 16, grid=grid(16), stream=stream0) del primals_1 buf1 = empty_strided_cuda((4, 128), (128, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm] extern_kernels.addmm(primals_4, buf0, reinterpret_tensor(primals_3, (4, 128), (1, 4), 0), alpha=1, beta=1, out=buf1) buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [relu_1], Original ATen: [aten.relu] triton_poi_fused_relu_0.run(primals_2, buf2, 16, grid=grid(16), stream=stream0) buf3 = empty_strided_cuda((4, 128), (128, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm] extern_kernels.addmm(primals_4, buf2, reinterpret_tensor(primals_3, (4, 128), (1, 4), 0), alpha=1, beta=1, out=buf3) del primals_3 del primals_4 buf4 = empty_strided_cuda((4, ), (1, ), torch.float32) buf6 = empty_strided_cuda((4, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [gate, sum_1, gate_1], Original ATen: [aten.mul, aten.sum, aten.sigmoid, aten.sigmoid_backward] triton_per_fused_mul_sigmoid_sigmoid_backward_sum_1.run(buf1, buf3, buf4, buf6, 4, 128, grid=grid(4), stream=stream0) buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [output], Original ATen: [aten.mul] triton_poi_fused_mul_2.run(primals_2, buf4, buf5, 16, grid=grid(16), stream=stream0) del buf4 return (buf5, primals_2, buf0, buf1, buf2, buf3, buf6, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((128, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torchvision.transforms import functional as F import torch.utils.data from torch import nn import torch.nn.functional as F class Message_Passing_Unit_v2(nn.Module): def __init__(self, fea_size, filter_size=128): super(Message_Passing_Unit_v2, self).__init__() self.w = nn.Linear(fea_size, filter_size, bias=True) self.fea_size = fea_size self.filter_size = filter_size def forward(self, unary_term, pair_term): if unary_term.size()[0] == 1 and pair_term.size()[0] > 1: unary_term = unary_term.expand(pair_term.size()[0], unary_term. size()[1]) if unary_term.size()[0] > 1 and pair_term.size()[0] == 1: pair_term = pair_term.expand(unary_term.size()[0], pair_term. size()[1]) gate = self.w(F.relu(unary_term)) * self.w(F.relu(pair_term)) gate = torch.sigmoid(gate.sum(1)) output = pair_term * gate.expand(gate.size()[0], pair_term.size()[1]) return output def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'fea_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.utils.data from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_per_fused_mul_sigmoid_sigmoid_backward_sum_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 128 * x0), xmask, other=0.0) tmp1 = tl.load(in_ptr1 + (r1 + 128 * x0), xmask, other=0.0) tmp2 = tmp0 * tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = tl.where(xmask, tmp3, 0) tmp6 = tl.sum(tmp5, 1)[:, None] tmp7 = tl.sigmoid(tmp6) tmp8 = 1.0 tmp9 = tmp8 - tmp7 tmp10 = tmp7 * tmp9 tl.store(out_ptr1 + x0, tmp10, xmask) tl.store(out_ptr0 + x0, tmp6, xmask) @triton.jit def triton_poi_fused_mul_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tl.sigmoid(tmp1) tmp3 = tmp0 * tmp2 tl.store(out_ptr0 + x2, tmp3, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (128, 4), (4, 1)) assert_size_stride(primals_4, (128,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_relu_0[grid(16)](primals_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 128), (128, 1), torch.float32) extern_kernels.addmm(primals_4, buf0, reinterpret_tensor(primals_3, (4, 128), (1, 4), 0), alpha=1, beta=1, out=buf1) buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_relu_0[grid(16)](primals_2, buf2, 16, XBLOCK=16, num_warps=1, num_stages=1) buf3 = empty_strided_cuda((4, 128), (128, 1), torch.float32) extern_kernels.addmm(primals_4, buf2, reinterpret_tensor(primals_3, (4, 128), (1, 4), 0), alpha=1, beta=1, out=buf3) del primals_3 del primals_4 buf4 = empty_strided_cuda((4,), (1,), torch.float32) buf6 = empty_strided_cuda((4,), (1,), torch.float32) triton_per_fused_mul_sigmoid_sigmoid_backward_sum_1[grid(4)](buf1, buf3, buf4, buf6, 4, 128, XBLOCK=1, num_warps=2, num_stages=1) buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_mul_2[grid(16)](primals_2, buf4, buf5, 16, XBLOCK= 16, num_warps=1, num_stages=1) del buf4 return buf5, primals_2, buf0, buf1, buf2, buf3, buf6 class Message_Passing_Unit_v2New(nn.Module): def __init__(self, fea_size, filter_size=128): super(Message_Passing_Unit_v2New, self).__init__() self.w = nn.Linear(fea_size, filter_size, bias=True) self.fea_size = fea_size self.filter_size = filter_size def forward(self, input_0, input_1): primals_3 = self.w.weight primals_4 = self.w.bias primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
SpartaG117/scene_graph_benchmark
Message_Passing_Unit_v2
false
1,102
[ "MIT" ]
0
e2e49940dd2f752b1faf9ae26707435ba3441bcb
https://github.com/SpartaG117/scene_graph_benchmark/tree/e2e49940dd2f752b1faf9ae26707435ba3441bcb
import torch from torchvision.transforms import functional as F import torch.utils.data from torch import nn import torch.nn.functional as F class Model(nn.Module): def __init__(self, fea_size, filter_size=128): super().__init__() self.w = nn.Linear(fea_size, filter_size, bias=True) self.fea_size = fea_size self.filter_size = filter_size def forward(self, unary_term, pair_term): if unary_term.size()[0] == 1 and pair_term.size()[0] > 1: unary_term = unary_term.expand(pair_term.size()[0], unary_term. size()[1]) if unary_term.size()[0] > 1 and pair_term.size()[0] == 1: pair_term = pair_term.expand(unary_term.size()[0], pair_term. size()[1]) gate = self.w(F.relu(unary_term)) * self.w(F.relu(pair_term)) gate = torch.sigmoid(gate.sum(1)) output = pair_term * gate.expand(gate.size()[0], pair_term.size()[1]) return output def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [4]
PositionalEmbedding
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/xc/cxchazmi4sbqmo6kndy2optflcbyzztvdu4wnn2b25nwssqinnbj.py # Topologically Sorted Source Nodes: [signal], Original ATen: [aten.cat] # Source node to ATen node mapping: # signal => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%sin, %cos], 1), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 2, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp0.to(tl.float32) tmp6 = -9.210340371976184 tmp7 = tmp5 * tmp6 tmp8 = tl_math.exp(tmp7) tmp9 = x1 tmp10 = tmp9.to(tl.float32) tmp11 = tmp10 * tmp8 tmp12 = tl_math.sin(tmp11) tmp13 = tl.full(tmp12.shape, 0.0, tmp12.dtype) tmp14 = tl.where(tmp4, tmp12, tmp13) tmp15 = tmp0 >= tmp3 tmp16 = tl.full([1], 4, tl.int64) tmp17 = tmp0 < tmp16 tmp18 = (-2) + x0 tmp19 = tmp18.to(tl.float32) tmp20 = tmp19 * tmp6 tmp21 = tl_math.exp(tmp20) tmp22 = tmp10 * tmp21 tmp23 = tl_math.cos(tmp22) tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype) tmp25 = tl.where(tmp15, tmp23, tmp24) tmp26 = tl.where(tmp4, tmp14, tmp25) tl.store(out_ptr0 + (x2), tmp26, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/li/clijl6e37kmnvjjduuogi6e6ltrhmuxbtlkhmiy5ijbkqmjtltpl.py # Topologically Sorted Source Nodes: [add], Original ATen: [aten.add] # Source node to ATen node mapping: # add => add_2 # Graph fragment: # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg0_1, %view), kwargs = {}) triton_poi_fused_add_1 = async_compile.triton('triton_poi_fused_add_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [signal], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(buf0, 16, grid=grid(16), stream=stream0) buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [add], Original ATen: [aten.add] triton_poi_fused_add_1.run(arg0_1, buf0, buf1, 64, grid=grid(64), stream=stream0) del arg0_1 del buf0 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch class PositionalEmbedding(torch.nn.Module): def __init__(self): super(PositionalEmbedding, self).__init__() def forward(self, inputs): if inputs.dim() != 3: raise ValueError('The rank of input must be 3.') length = inputs.shape[1] channels = inputs.shape[2] half_dim = channels // 2 positions = torch.arange(length, dtype=inputs.dtype, device=inputs. device) dimensions = torch.arange(half_dim, dtype=inputs.dtype, device= inputs.device) scale = math.log(10000.0) / float(half_dim - 1) dimensions.mul_(-scale).exp_() scaled_time = positions.unsqueeze(1) * dimensions.unsqueeze(0) signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], dim=1) if channels % 2 == 1: pad = torch.zeros([signal.shape[0], 1], dtype=inputs.dtype, device=inputs.device) signal = torch.cat([signal, pad], axis=1) return inputs + torch.reshape(signal, [1, -1, channels]) def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_cat_0(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 2, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp0.to(tl.float32) tmp6 = -9.210340371976184 tmp7 = tmp5 * tmp6 tmp8 = tl_math.exp(tmp7) tmp9 = x1 tmp10 = tmp9.to(tl.float32) tmp11 = tmp10 * tmp8 tmp12 = tl_math.sin(tmp11) tmp13 = tl.full(tmp12.shape, 0.0, tmp12.dtype) tmp14 = tl.where(tmp4, tmp12, tmp13) tmp15 = tmp0 >= tmp3 tl.full([1], 4, tl.int64) tmp18 = -2 + x0 tmp19 = tmp18.to(tl.float32) tmp20 = tmp19 * tmp6 tmp21 = tl_math.exp(tmp20) tmp22 = tmp10 * tmp21 tmp23 = tl_math.cos(tmp22) tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype) tmp25 = tl.where(tmp15, tmp23, tmp24) tmp26 = tl.where(tmp4, tmp14, tmp25) tl.store(out_ptr0 + x2, tmp26, xmask) @triton.jit def triton_poi_fused_add_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + x2, tmp2, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(16)](buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_1[grid(64)](arg0_1, buf0, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 del buf0 return buf1, class PositionalEmbeddingNew(torch.nn.Module): def __init__(self): super(PositionalEmbeddingNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
THUNLP-MT/PLM4MT
PositionalEmbedding
false
1,103
[ "BSD-3-Clause" ]
0
85bd2ee9d96b07ac827e14d4b3e5b0d0924c3401
https://github.com/THUNLP-MT/PLM4MT/tree/85bd2ee9d96b07ac827e14d4b3e5b0d0924c3401
import math import torch class Model(torch.nn.Module): def __init__(self): super().__init__() def forward(self, inputs): if inputs.dim() != 3: raise ValueError('The rank of input must be 3.') length = inputs.shape[1] channels = inputs.shape[2] half_dim = channels // 2 positions = torch.arange(length, dtype=inputs.dtype, device=inputs. device) dimensions = torch.arange(half_dim, dtype=inputs.dtype, device= inputs.device) scale = math.log(10000.0) / float(half_dim - 1) dimensions.mul_(-scale).exp_() scaled_time = positions.unsqueeze(1) * dimensions.unsqueeze(0) signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], dim=1) if channels % 2 == 1: pad = torch.zeros([signal.shape[0], 1], dtype=inputs.dtype, device=inputs.device) signal = torch.cat([signal, pad], axis=1) return inputs + torch.reshape(signal, [1, -1, channels]) def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return []
MaxPool
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/2l/c2lm5wvy5varadxpp77k6lvi6yjwzernwi4uqg6gmabg2nygeeur.py # Topologically Sorted Source Nodes: [max_1], Original ATen: [aten.max] # Source node to ATen node mapping: # max_1 => getitem # Graph fragment: # %getitem : [num_users=1] = call_function[target=operator.getitem](args = (%max_1, 0), kwargs = {}) triton_poi_fused_max_0 = async_compile.triton('triton_poi_fused_max_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = (xindex // 16) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask) tmp1 = tl.load(in_ptr0 + (16 + x0 + (64*x1)), xmask) tmp3 = tl.load(in_ptr0 + (32 + x0 + (64*x1)), xmask) tmp5 = tl.load(in_ptr0 + (48 + x0 + (64*x1)), xmask) tmp2 = triton_helpers.maximum(tmp0, tmp1) tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp6 = triton_helpers.maximum(tmp4, tmp5) tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [max_1], Original ATen: [aten.max] stream0 = get_raw_stream(0) triton_poi_fused_max_0.run(arg0_1, buf0, 64, grid=grid(64), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class MaxPool(nn.Module): def __init__(self, dim=1): super(MaxPool, self).__init__() self.dim = dim def forward(self, input): return torch.max(input, self.dim)[0] def __repr__(self): return self.__class__.__name__ + ' (' + 'dim=' + str(self.dim) + ')' def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_max_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tmp1 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask) tmp3 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask) tmp5 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask) tmp2 = triton_helpers.maximum(tmp0, tmp1) tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp6 = triton_helpers.maximum(tmp4, tmp5) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_max_0[grid(64)](arg0_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 return buf0, class MaxPoolNew(nn.Module): def __init__(self, dim=1): super(MaxPoolNew, self).__init__() self.dim = dim def __repr__(self): return self.__class__.__name__ + ' (' + 'dim=' + str(self.dim) + ')' def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
SwaggyZhang/Geometry-aware
MaxPool
false
1,104
[ "Apache-2.0" ]
0
a750c00aa2f0bda5160dfdeee2eef5230fd9d993
https://github.com/SwaggyZhang/Geometry-aware/tree/a750c00aa2f0bda5160dfdeee2eef5230fd9d993
import torch import torch.nn as nn class Model(nn.Module): def __init__(self, dim=1): super().__init__() self.dim = dim def forward(self, input): return torch.max(input, self.dim)[0] def __repr__(self): return self.__class__.__name__ + ' (' + 'dim=' + str(self.dim) + ')' def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
Transpose
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/64/c64ahxnpt5ixqrlolbug3qf6y4u2zqmcjekif2yu4ba4hcze2fom.py # Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone] # Source node to ATen node mapping: # contiguous => clone # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = (xindex // 16) % 4 x2 = (xindex // 64) x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (16*x2) + (64*x1)), xmask) tl.store(out_ptr0 + (x3), tmp0, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone] stream0 = get_raw_stream(0) triton_poi_fused_clone_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class Transpose(nn.Module): def __init__(self, dim1=0, dim2=1): super(Transpose, self).__init__() self.dim1 = dim1 self.dim2 = dim2 def forward(self, input): return input.transpose(self.dim1, self.dim2).contiguous() def __repr__(self): return self.__class__.__name__ + ' (' + 'between=' + str(self.dim1 ) + ',' + str(self.dim2) + ')' def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 % 4 x2 = xindex // 64 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x2 + 64 * x1), xmask) tl.store(out_ptr0 + x3, tmp0, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class TransposeNew(nn.Module): def __init__(self, dim1=0, dim2=1): super(TransposeNew, self).__init__() self.dim1 = dim1 self.dim2 = dim2 def __repr__(self): return self.__class__.__name__ + ' (' + 'between=' + str(self.dim1 ) + ',' + str(self.dim2) + ')' def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
SwaggyZhang/Geometry-aware
Transpose
false
1,105
[ "Apache-2.0" ]
0
a750c00aa2f0bda5160dfdeee2eef5230fd9d993
https://github.com/SwaggyZhang/Geometry-aware/tree/a750c00aa2f0bda5160dfdeee2eef5230fd9d993
import torch import torch.nn as nn class Model(nn.Module): def __init__(self, dim1=0, dim2=1): super().__init__() self.dim1 = dim1 self.dim2 = dim2 def forward(self, input): return input.transpose(self.dim1, self.dim2).contiguous() def __repr__(self): return self.__class__.__name__ + ' (' + 'between=' + str(self.dim1 ) + ',' + str(self.dim2) + ')' def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
GraphConv
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/rl/crljeqnoa6ykpfmvk4fgvc6cahtaak6ilhiaoxyzgcd7ynbpfnj2.py # Topologically Sorted Source Nodes: [ones], Original ATen: [aten.ones] # Source node to ATen node mapping: # ones => full_default # Graph fragment: # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 1], 1), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) triton_poi_fused_ones_0 = async_compile.triton('triton_poi_fused_ones_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_ones_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_ones_0(out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = 1.0 tl.store(out_ptr0 + (x0), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/dn/cdnkxcepcwle4u3a2zcw72axoqvmyd4bn2kvjp72wctqmnh3vrpp.py # Topologically Sorted Source Nodes: [result, result_1], Original ATen: [aten.div, aten.add] # Source node to ATen node mapping: # result => div # result_1 => add # Graph fragment: # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_6, %view_1), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%div, %view_8), kwargs = {}) triton_poi_fused_add_div_1 = async_compile.triton('triton_poi_fused_add_div_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x2), xmask) tmp4 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 / tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tl.store(in_out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, ), (1, )) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [ones], Original ATen: [aten.ones] stream0 = get_raw_stream(0) triton_poi_fused_ones_0.run(buf0, 4, grid=grid(4), stream=stream0) buf1 = empty_strided_cuda((64, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [ones, norm], Original ATen: [aten.ones, aten.mm] extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), buf0, out=buf1) del buf0 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm] extern_kernels.addmm(primals_4, reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_3 del primals_4 buf3 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(primals_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0), out=buf3) buf4 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf4) del primals_5 buf5 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf3 # reuse # Topologically Sorted Source Nodes: [result, result_1], Original ATen: [aten.div, aten.add] triton_poi_fused_add_div_1.run(buf5, buf1, buf4, primals_6, 256, grid=grid(256), stream=stream0) del buf4 del primals_6 return (buf5, buf1, reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (16, 4, 4), (16, 1, 4), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn import torch.nn import torch.autograd def sparse_bmm(sparse_matrix, dense_matrix_batch): """ Perform torch.bmm on an unbatched sparse matrix and a batched dense matrix. Args: sparse_matrix (torch.sparse.FloatTensor): Shape = (m, n) dense_matrix_batch (torch.FloatTensor): Shape = (b, n, p) Returns: (torch.FloatTensor): Result of the batched matrix multiplication. Shape = (b, n, p) """ m = sparse_matrix.shape[0] b, n, p = dense_matrix_batch.shape dense_matrix = dense_matrix_batch.transpose(0, 1).reshape(n, b * p) result = torch.sparse.mm(sparse_matrix, dense_matrix) return result.reshape(m, b, p).transpose(0, 1) class GraphConv(nn.Module): """ A simple graph convolution layer, similar to the one defined in Kipf et al. https://arxiv.org/abs/1609.02907 This operation with self_layer=False is equivalent to :math:`(A H W)` where: - :math:`H` is the node features with shape (batch_size, num_nodes, input_dim) - :math:`W` is a weight matrix of shape (input_dim, output_dim) - :math:`A` is the adjacency matrix of shape (num_nodes, num_nodes). It can include self-loop. With normalize_adj=True, it is equivalent to :math:`(D^{-1} A H W)`, where: - :math:`D` is a diagonal matrix with :math:`D_{ii}` = the sum of the i-th row of A. In other words, :math:`D` is the incoming degree of each node. With self_layer=True, it is equivalent to the above plus :math:`(H W_{\\text{self}})`, where: - :math:`W_{\\text{self}}` is a separate weight matrix to filter each node's self features. Note that when self_layer is True, A should not include self-loop. Example: >>> node_feat = torch.rand(1, 3, 5) >>> i = torch.LongTensor( ... [[0, 1, 1, 2, 2, 0], [1, 0, 2, 1, 0, 2]]) >>> v = torch.FloatTensor([1, 1, 1, 1, 1, 1]) >>> adj = torch.sparse.FloatTensor(i, v, torch.Size([3, 3])) >>> model = GraphConv(5, 10) >>> output = model(node_feat, adj) >>> # pre-normalize adj >>> adj = normalize_adj(adj) >>> output = model(node_feat, adj, normalize_adj=False) If you use this code, please cite the original paper in addition to Kaolin. .. code-block:: @article{kipf2016semi, title={Semi-Supervised Classification with Graph Convolutional Networks}, author={Kipf, Thomas N and Welling, Max}, journal={arXiv preprint arXiv:1609.02907}, year={2016} } Args: input_dim (int): The number of features in each input node. output_dim (int): The number of features in each output node. bias (bool): Whether to add bias after the node-wise linear layer. """ def __init__(self, input_dim, output_dim, self_layer=True, bias=True): super(GraphConv, self).__init__() self.self_layer = self_layer self.linear = nn.Linear(input_dim, output_dim, bias=bias) if self_layer: self.linear_self = nn.Linear(input_dim, output_dim, bias=bias) else: self.linear_self = None self.initialize() def initialize(self): nn.init.xavier_uniform_(self.linear.weight.data) if self.linear.bias is not None: self.linear.bias.data.uniform_(-1.0, 1.0) if self.self_layer: nn.init.xavier_uniform_(self.linear_self.weight.data) if self.linear_self.bias is not None: self.linear_self.bias.data.uniform_(-1.0, 1.0) def forward(self, node_feat, adj, normalize_adj=True): """ Args: node_feat (torch.FloatTensor): Shape = (batch_size, num_nodes, input_dim) The input features of each node. adj (torch.sparse.FloatTensor or torch.FloatTensor): Shape = (num_nodes, num_nodes) The adjacency matrix. adj[i, j] is non-zero if there's an incoming edge from j to i. Should not include self-loop if self_layer is True. normalize_adj (bool): Set this to true to apply normalization to adjacency; that is, each output feature will be divided by the number of incoming neighbors. If normalization is not desired, or if the adjacency matrix is pre-normalized, set this to False to improve performance. Returns: (torch.FloatTensor): The output features of each node. Shape = (batch_size, num_nodes, output_dim) """ if adj.type().endswith('sparse.FloatTensor'): if normalize_adj: norm = torch.sparse.mm(adj, torch.ones((adj.shape[0], 1), device=node_feat.device)) result = sparse_bmm(adj, self.linear(node_feat)) / norm else: result = sparse_bmm(adj, self.linear(node_feat)) elif normalize_adj: norm = torch.matmul(adj, torch.ones((adj.shape[0], 1), device= node_feat.device)) result = torch.matmul(adj, self.linear(node_feat)) / norm else: result = torch.matmul(adj, self.linear(node_feat)) if self.self_layer: result += self.linear_self(node_feat) return result def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_dim': 4, 'output_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn import torch.nn import torch.autograd assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_ones_0(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = 1.0 tl.store(out_ptr0 + x0, tmp0, xmask) @triton.jit def triton_poi_fused_add_div_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x2, xmask) tmp4 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 / tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tl.store(in_out_ptr0 + x2, tmp6, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1), (1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_ones_0[grid(4)](buf0, 4, XBLOCK=4, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), buf0, out=buf1) del buf0 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_4, reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf2) del primals_3 del primals_4 buf3 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(primals_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0), out=buf3) buf4 = buf2 del buf2 extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf4) del primals_5 buf5 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf3 triton_poi_fused_add_div_1[grid(256)](buf5, buf1, buf4, primals_6, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf4 del primals_6 return buf5, buf1, reinterpret_tensor(primals_2, (64, 4), (4, 1), 0 ), reinterpret_tensor(primals_1, (16, 4, 4), (16, 1, 4), 0) def sparse_bmm(sparse_matrix, dense_matrix_batch): """ Perform torch.bmm on an unbatched sparse matrix and a batched dense matrix. Args: sparse_matrix (torch.sparse.FloatTensor): Shape = (m, n) dense_matrix_batch (torch.FloatTensor): Shape = (b, n, p) Returns: (torch.FloatTensor): Result of the batched matrix multiplication. Shape = (b, n, p) """ m = sparse_matrix.shape[0] b, n, p = dense_matrix_batch.shape dense_matrix = dense_matrix_batch.transpose(0, 1).reshape(n, b * p) result = torch.sparse.mm(sparse_matrix, dense_matrix) return result.reshape(m, b, p).transpose(0, 1) class GraphConvNew(nn.Module): """ A simple graph convolution layer, similar to the one defined in Kipf et al. https://arxiv.org/abs/1609.02907 This operation with self_layer=False is equivalent to :math:`(A H W)` where: - :math:`H` is the node features with shape (batch_size, num_nodes, input_dim) - :math:`W` is a weight matrix of shape (input_dim, output_dim) - :math:`A` is the adjacency matrix of shape (num_nodes, num_nodes). It can include self-loop. With normalize_adj=True, it is equivalent to :math:`(D^{-1} A H W)`, where: - :math:`D` is a diagonal matrix with :math:`D_{ii}` = the sum of the i-th row of A. In other words, :math:`D` is the incoming degree of each node. With self_layer=True, it is equivalent to the above plus :math:`(H W_{\\text{self}})`, where: - :math:`W_{\\text{self}}` is a separate weight matrix to filter each node's self features. Note that when self_layer is True, A should not include self-loop. Example: >>> node_feat = torch.rand(1, 3, 5) >>> i = torch.LongTensor( ... [[0, 1, 1, 2, 2, 0], [1, 0, 2, 1, 0, 2]]) >>> v = torch.FloatTensor([1, 1, 1, 1, 1, 1]) >>> adj = torch.sparse.FloatTensor(i, v, torch.Size([3, 3])) >>> model = GraphConv(5, 10) >>> output = model(node_feat, adj) >>> # pre-normalize adj >>> adj = normalize_adj(adj) >>> output = model(node_feat, adj, normalize_adj=False) If you use this code, please cite the original paper in addition to Kaolin. .. code-block:: @article{kipf2016semi, title={Semi-Supervised Classification with Graph Convolutional Networks}, author={Kipf, Thomas N and Welling, Max}, journal={arXiv preprint arXiv:1609.02907}, year={2016} } Args: input_dim (int): The number of features in each input node. output_dim (int): The number of features in each output node. bias (bool): Whether to add bias after the node-wise linear layer. """ def __init__(self, input_dim, output_dim, self_layer=True, bias=True): super(GraphConvNew, self).__init__() self.self_layer = self_layer self.linear = nn.Linear(input_dim, output_dim, bias=bias) if self_layer: self.linear_self = nn.Linear(input_dim, output_dim, bias=bias) else: self.linear_self = None self.initialize() def initialize(self): nn.init.xavier_uniform_(self.linear.weight.data) if self.linear.bias is not None: self.linear.bias.data.uniform_(-1.0, 1.0) if self.self_layer: nn.init.xavier_uniform_(self.linear_self.weight.data) if self.linear_self.bias is not None: self.linear_self.bias.data.uniform_(-1.0, 1.0) def forward(self, input_0, input_1): primals_3 = self.linear.weight primals_4 = self.linear.bias primals_5 = self.linear_self.weight primals_6 = self.linear_self.bias primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
T0mt0mp/kaolin
GraphConv
false
1,106
[ "ECL-2.0", "Apache-2.0" ]
0
57d1e1478eec8df49dc7cc492f25637cec40399f
https://github.com/T0mt0mp/kaolin/tree/57d1e1478eec8df49dc7cc492f25637cec40399f
import torch from torch import nn import torch.nn import torch.autograd def sparse_bmm(sparse_matrix, dense_matrix_batch): """ Perform torch.bmm on an unbatched sparse matrix and a batched dense matrix. Args: sparse_matrix (torch.sparse.FloatTensor): Shape = (m, n) dense_matrix_batch (torch.FloatTensor): Shape = (b, n, p) Returns: (torch.FloatTensor): Result of the batched matrix multiplication. Shape = (b, n, p) """ m = sparse_matrix.shape[0] b, n, p = dense_matrix_batch.shape dense_matrix = dense_matrix_batch.transpose(0, 1).reshape(n, b * p) result = torch.sparse.mm(sparse_matrix, dense_matrix) return result.reshape(m, b, p).transpose(0, 1) class Model(nn.Module): """ A simple graph convolution layer, similar to the one defined in Kipf et al. https://arxiv.org/abs/1609.02907 This operation with self_layer=False is equivalent to :math:`(A H W)` where: - :math:`H` is the node features with shape (batch_size, num_nodes, input_dim) - :math:`W` is a weight matrix of shape (input_dim, output_dim) - :math:`A` is the adjacency matrix of shape (num_nodes, num_nodes). It can include self-loop. With normalize_adj=True, it is equivalent to :math:`(D^{-1} A H W)`, where: - :math:`D` is a diagonal matrix with :math:`D_{ii}` = the sum of the i-th row of A. In other words, :math:`D` is the incoming degree of each node. With self_layer=True, it is equivalent to the above plus :math:`(H W_{\\text{self}})`, where: - :math:`W_{\\text{self}}` is a separate weight matrix to filter each node's self features. Note that when self_layer is True, A should not include self-loop. Example: >>> node_feat = torch.rand(1, 3, 5) >>> i = torch.LongTensor( ... [[0, 1, 1, 2, 2, 0], [1, 0, 2, 1, 0, 2]]) >>> v = torch.FloatTensor([1, 1, 1, 1, 1, 1]) >>> adj = torch.sparse.FloatTensor(i, v, torch.Size([3, 3])) >>> model = GraphConv(5, 10) >>> output = model(node_feat, adj) >>> # pre-normalize adj >>> adj = normalize_adj(adj) >>> output = model(node_feat, adj, normalize_adj=False) If you use this code, please cite the original paper in addition to Kaolin. .. code-block:: @article{kipf2016semi, title={Semi-Supervised Classification with Graph Convolutional Networks}, author={Kipf, Thomas N and Welling, Max}, journal={arXiv preprint arXiv:1609.02907}, year={2016} } Args: input_dim (int): The number of features in each input node. output_dim (int): The number of features in each output node. bias (bool): Whether to add bias after the node-wise linear layer. """ def __init__(self, input_dim, output_dim, self_layer=True, bias=True): super().__init__() self.self_layer = self_layer self.linear = nn.Linear(input_dim, output_dim, bias=bias) if self_layer: self.linear_self = nn.Linear(input_dim, output_dim, bias=bias) else: self.linear_self = None self.initialize() def initialize(self): nn.init.xavier_uniform_(self.linear.weight.data) if self.linear.bias is not None: self.linear.bias.data.uniform_(-1.0, 1.0) if self.self_layer: nn.init.xavier_uniform_(self.linear_self.weight.data) if self.linear_self.bias is not None: self.linear_self.bias.data.uniform_(-1.0, 1.0) def forward(self, node_feat, adj, normalize_adj=True): """ Args: node_feat (torch.FloatTensor): Shape = (batch_size, num_nodes, input_dim) The input features of each node. adj (torch.sparse.FloatTensor or torch.FloatTensor): Shape = (num_nodes, num_nodes) The adjacency matrix. adj[i, j] is non-zero if there's an i # ... truncated (>4000 chars) for memory efficiency
Align
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/7l/c7lrnzz7u6zsph7qtb6own22vqc34am7brqewva4qc33ddgzxza7.py # Topologically Sorted Source Nodes: [sub, norm, pred], Original ATen: [aten.sub, aten.linalg_vector_norm, aten.neg] # Source node to ATen node mapping: # norm => pow_1, pow_2, sum_1 # pred => neg # sub => sub # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %arg1_1), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 4), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1]), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.25), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%pow_2,), kwargs = {}) triton_poi_fused_linalg_vector_norm_neg_sub_0 = async_compile.triton('triton_poi_fused_linalg_vector_norm_neg_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_linalg_vector_norm_neg_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_linalg_vector_norm_neg_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = (xindex // 16) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask) tmp1 = tl.load(in_ptr1 + (x0 + (64*x1)), xmask) tmp5 = tl.load(in_ptr0 + (16 + x0 + (64*x1)), xmask) tmp6 = tl.load(in_ptr1 + (16 + x0 + (64*x1)), xmask) tmp11 = tl.load(in_ptr0 + (32 + x0 + (64*x1)), xmask) tmp12 = tl.load(in_ptr1 + (32 + x0 + (64*x1)), xmask) tmp17 = tl.load(in_ptr0 + (48 + x0 + (64*x1)), xmask) tmp18 = tl.load(in_ptr1 + (48 + x0 + (64*x1)), xmask) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp4 = tmp3 * tmp3 tmp7 = tmp5 - tmp6 tmp8 = tmp7 * tmp7 tmp9 = tmp8 * tmp8 tmp10 = tmp4 + tmp9 tmp13 = tmp11 - tmp12 tmp14 = tmp13 * tmp13 tmp15 = tmp14 * tmp14 tmp16 = tmp10 + tmp15 tmp19 = tmp17 - tmp18 tmp20 = tmp19 * tmp19 tmp21 = tmp20 * tmp20 tmp22 = tmp16 + tmp21 tmp23 = 0.25 tmp24 = libdevice.pow(tmp22, tmp23) tmp25 = -tmp24 tl.store(out_ptr0 + (x2), tmp25, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [sub, norm, pred], Original ATen: [aten.sub, aten.linalg_vector_norm, aten.neg] stream0 = get_raw_stream(0) triton_poi_fused_linalg_vector_norm_neg_sub_0.run(arg0_1, arg1_1, buf0, 64, grid=grid(64), stream=stream0) del arg0_1 del arg1_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F class Align(torch.nn.Module): def __init__(self, p): super(Align, self).__init__() self.p = p def forward(self, e1, e2): pred = -torch.norm(e1 - e2, p=self.p, dim=1) return pred def only_pos_loss(self, e1, r, e2): return -F.logsigmoid(-torch.sum(torch.pow(e1 + r - e2, 2), 1)).sum() def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'p': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_linalg_vector_norm_neg_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tmp1 = tl.load(in_ptr1 + (x0 + 64 * x1), xmask) tmp5 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask) tmp6 = tl.load(in_ptr1 + (16 + x0 + 64 * x1), xmask) tmp11 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask) tmp12 = tl.load(in_ptr1 + (32 + x0 + 64 * x1), xmask) tmp17 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask) tmp18 = tl.load(in_ptr1 + (48 + x0 + 64 * x1), xmask) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp4 = tmp3 * tmp3 tmp7 = tmp5 - tmp6 tmp8 = tmp7 * tmp7 tmp9 = tmp8 * tmp8 tmp10 = tmp4 + tmp9 tmp13 = tmp11 - tmp12 tmp14 = tmp13 * tmp13 tmp15 = tmp14 * tmp14 tmp16 = tmp10 + tmp15 tmp19 = tmp17 - tmp18 tmp20 = tmp19 * tmp19 tmp21 = tmp20 * tmp20 tmp22 = tmp16 + tmp21 tmp23 = 0.25 tmp24 = libdevice.pow(tmp22, tmp23) tmp25 = -tmp24 tl.store(out_ptr0 + x2, tmp25, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_linalg_vector_norm_neg_sub_0[grid(64)](arg0_1, arg1_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 del arg1_1 return buf0, class AlignNew(torch.nn.Module): def __init__(self, p): super(AlignNew, self).__init__() self.p = p def only_pos_loss(self, e1, r, e2): return -F.logsigmoid(-torch.sum(torch.pow(e1 + r - e2, 2), 1)).sum() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
TMUITLab/EAFR
Align
false
1,108
[ "MIT" ]
0
dadb6485d48711ccb8aa2f03760aeb437645f1ff
https://github.com/TMUITLab/EAFR/tree/dadb6485d48711ccb8aa2f03760aeb437645f1ff
import torch import torch.nn.functional as F class Model(torch.nn.Module): def __init__(self, p): super().__init__() self.p = p def forward(self, e1, e2): pred = -torch.norm(e1 - e2, p=self.p, dim=1) return pred def only_pos_loss(self, e1, r, e2): return -F.logsigmoid(-torch.sum(torch.pow(e1 + r - e2, 2), 1)).sum() def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4]
MNISTGenerator
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/hy/chyn4ucj7uhqavrcrhxk2c5izzfdiw63bn3glmpyn3tpx5bpigdc.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu] # Source node to ATen node mapping: # x => relu # Graph fragment: # %add_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_2), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_1,), kwargs = {}) triton_poi_fused_relu_0 = async_compile.triton('triton_poi_fused_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/mk/cmkwbx5yy6hhpvjwjmlmi7fvwp45sgwxrqj3vqfqnafxubycnrag.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.sigmoid, aten.sigmoid_backward] # Source node to ATen node mapping: # x_1 => sigmoid # Graph fragment: # %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_5), kwargs = {}) # %sigmoid : [num_users=3] = call_function[target=torch.ops.aten.sigmoid.default](args = (%add_tensor,), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %sigmoid), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %sub), kwargs = {}) triton_poi_fused_sigmoid_sigmoid_backward_1 = async_compile.triton('triton_poi_fused_sigmoid_sigmoid_backward_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4096], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_sigmoid_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_sigmoid_sigmoid_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 3136 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 784 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tmp4 = 1.0 tmp5 = tmp4 - tmp3 tmp6 = tmp3 * tmp5 tl.store(in_out_ptr0 + (x2), tmp3, xmask) tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (128, 4), (4, 1)) assert_size_stride(primals_2, (128, ), (1, )) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (784, 128), (128, 1)) assert_size_stride(primals_5, (784, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 128), (128, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(primals_3, reinterpret_tensor(primals_1, (4, 128), (1, 4), 0), out=buf0) del primals_1 buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_relu_0.run(buf1, primals_2, 512, grid=grid(512), stream=stream0) del primals_2 buf2 = empty_strided_cuda((4, 784), (784, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (128, 784), (1, 128), 0), out=buf2) buf3 = buf2; del buf2 # reuse buf4 = empty_strided_cuda((4, 784), (784, 1), torch.float32) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.sigmoid, aten.sigmoid_backward] triton_poi_fused_sigmoid_sigmoid_backward_1.run(buf3, primals_5, buf4, 3136, grid=grid(3136), stream=stream0) del primals_5 return (reinterpret_tensor(buf3, (4, 1, 28, 28), (784, 784, 28, 1), 0), primals_3, buf1, buf4, primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((128, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((784, 128), (128, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((784, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn as nn from torch import optim as optim from torchvision import transforms as transforms class MNISTGenerator(nn.Module): def __init__(self, latent_dim): super(MNISTGenerator, self).__init__() self.image_shape = 1, 28, 28 self.latent_dim = latent_dim self.dense1 = nn.Linear(self.latent_dim, 128, True) self.dense2 = nn.Linear(128, 784, True) def forward(self, x): x = nn.functional.relu(self.dense1(x)) x = nn.functional.sigmoid(self.dense2(x)) return x.view(x.shape[0], *self.image_shape) def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'latent_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn as nn from torch import optim as optim from torchvision import transforms as transforms assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_sigmoid_sigmoid_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 3136 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 784 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tmp4 = 1.0 tmp5 = tmp4 - tmp3 tmp6 = tmp3 * tmp5 tl.store(in_out_ptr0 + x2, tmp3, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (128, 4), (4, 1)) assert_size_stride(primals_2, (128,), (1,)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (784, 128), (128, 1)) assert_size_stride(primals_5, (784,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 128), (128, 1), torch.float32) extern_kernels.mm(primals_3, reinterpret_tensor(primals_1, (4, 128), (1, 4), 0), out=buf0) del primals_1 buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_relu_0[grid(512)](buf1, primals_2, 512, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((4, 784), (784, 1), torch.float32) extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (128, 784), ( 1, 128), 0), out=buf2) buf3 = buf2 del buf2 buf4 = empty_strided_cuda((4, 784), (784, 1), torch.float32) triton_poi_fused_sigmoid_sigmoid_backward_1[grid(3136)](buf3, primals_5, buf4, 3136, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 return reinterpret_tensor(buf3, (4, 1, 28, 28), (784, 784, 28, 1), 0 ), primals_3, buf1, buf4, primals_4 class MNISTGeneratorNew(nn.Module): def __init__(self, latent_dim): super(MNISTGeneratorNew, self).__init__() self.image_shape = 1, 28, 28 self.latent_dim = latent_dim self.dense1 = nn.Linear(self.latent_dim, 128, True) self.dense2 = nn.Linear(128, 784, True) def forward(self, input_0): primals_1 = self.dense1.weight primals_2 = self.dense1.bias primals_4 = self.dense2.weight primals_5 = self.dense2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
RobinMaas95/GTSRB_Visualization
MNISTGenerator
false
1,109
[ "MIT" ]
0
fa837ff94e089a936ef4f4418970d262b35f70b6
https://github.com/RobinMaas95/GTSRB_Visualization/tree/fa837ff94e089a936ef4f4418970d262b35f70b6
import torch from torch import nn as nn from torch import optim as optim from torchvision import transforms as transforms class Model(nn.Module): def __init__(self, latent_dim): super().__init__() self.image_shape = 1, 28, 28 self.latent_dim = latent_dim self.dense1 = nn.Linear(self.latent_dim, 128, True) self.dense2 = nn.Linear(128, 784, True) def forward(self, x): x = nn.functional.relu(self.dense1(x)) x = nn.functional.sigmoid(self.dense2(x)) return x.view(x.shape[0], *self.image_shape) def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [4]
Conv2d
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/yj/cyjqxrdr34zdlpnaqepj4py4tvwh2ebdslxkfeu7skxqjn4syiak.py # Topologically Sorted Source Nodes: [mean, mean_1], Original ATen: [aten.mean] # Source node to ATen node mapping: # mean => mean # mean_1 => mean_1 # Graph fragment: # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%primals_1, [1], True), kwargs = {}) # %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%mean, [2], True), kwargs = {}) triton_poi_fused_mean_0 = async_compile.triton('triton_poi_fused_mean_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mean_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mean_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask) tmp1 = tl.load(in_ptr0 + (16 + x0 + (64*x1)), xmask) tmp3 = tl.load(in_ptr0 + (32 + x0 + (64*x1)), xmask) tmp5 = tl.load(in_ptr0 + (48 + x0 + (64*x1)), xmask) tmp9 = tl.load(in_ptr0 + (4 + x0 + (64*x1)), xmask) tmp10 = tl.load(in_ptr0 + (20 + x0 + (64*x1)), xmask) tmp12 = tl.load(in_ptr0 + (36 + x0 + (64*x1)), xmask) tmp14 = tl.load(in_ptr0 + (52 + x0 + (64*x1)), xmask) tmp18 = tl.load(in_ptr0 + (8 + x0 + (64*x1)), xmask) tmp19 = tl.load(in_ptr0 + (24 + x0 + (64*x1)), xmask) tmp21 = tl.load(in_ptr0 + (40 + x0 + (64*x1)), xmask) tmp23 = tl.load(in_ptr0 + (56 + x0 + (64*x1)), xmask) tmp27 = tl.load(in_ptr0 + (12 + x0 + (64*x1)), xmask) tmp28 = tl.load(in_ptr0 + (28 + x0 + (64*x1)), xmask) tmp30 = tl.load(in_ptr0 + (44 + x0 + (64*x1)), xmask) tmp32 = tl.load(in_ptr0 + (60 + x0 + (64*x1)), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp11 = tmp9 + tmp10 tmp13 = tmp11 + tmp12 tmp15 = tmp13 + tmp14 tmp16 = tmp15 / tmp7 tmp17 = tmp8 + tmp16 tmp20 = tmp18 + tmp19 tmp22 = tmp20 + tmp21 tmp24 = tmp22 + tmp23 tmp25 = tmp24 / tmp7 tmp26 = tmp17 + tmp25 tmp29 = tmp27 + tmp28 tmp31 = tmp29 + tmp30 tmp33 = tmp31 + tmp32 tmp34 = tmp33 / tmp7 tmp35 = tmp26 + tmp34 tmp36 = tmp35 / tmp7 tl.store(out_ptr0 + (x2), tmp36, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/nn/cnnlxy27st37ghyasby2wfzvdhe7fuiziq2zan6tepfxf2oe5jld.py # Topologically Sorted Source Nodes: [weight_mean, weight, std, weight_1], Original ATen: [aten.mean, aten.sub, aten.std, aten.div] # Source node to ATen node mapping: # std => sqrt, var # weight => sub # weight_1 => div # weight_mean => mean_2 # Graph fragment: # %mean_2 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%mean_1, [3], True), kwargs = {}) # %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_1, %mean_2), kwargs = {}) # %var : [num_users=1] = call_function[target=torch.ops.aten.var.correction](args = (%view, [1]), kwargs = {correction: 1.0}) # %sqrt : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%var,), kwargs = {}) # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, %expand), kwargs = {}) triton_per_fused_div_mean_std_sub_1 = async_compile.triton('triton_per_fused_div_mean_std_sub_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[4, 64], reduction_hint=ReductionHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_div_mean_std_sub_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 3, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_div_mean_std_sub_1(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 4 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (64*x0)), xmask, other=0.0) tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = 4.0 tmp9 = tmp7 / tmp8 tmp10 = tmp0 - tmp9 tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK]) tmp13 = tl.where(xmask, tmp11, 0) tmp14 = tl.broadcast_to(tmp11, [XBLOCK, RBLOCK]) tmp16 = tl.where(xmask, tmp14, 0) tmp17 = tl.sum(tmp16, 1)[:, None] tmp18 = tl.full([XBLOCK, 1], 64, tl.int32) tmp19 = tmp18.to(tl.float32) tmp20 = tmp17 / tmp19 tmp21 = tmp11 - tmp20 tmp22 = tmp21 * tmp21 tmp23 = tl.broadcast_to(tmp22, [XBLOCK, RBLOCK]) tmp25 = tl.where(xmask, tmp23, 0) tmp26 = tl.sum(tmp25, 1)[:, None] tmp27 = 63.0 tmp28 = tmp26 / tmp27 tmp29 = libdevice.sqrt(tmp28) tmp30 = 1e-05 tmp31 = tmp29 + tmp30 tmp32 = tmp10 / tmp31 tl.store(out_ptr0 + (r1 + (64*x0)), tmp10, xmask) tl.debug_barrier() tl.store(in_out_ptr0 + (x0), tmp29, xmask) tl.store(out_ptr1 + (r1 + (64*x0)), tmp32, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/k2/ck2mamkqpmuzem4n3p4ij6fmfpy2bcbblg6sx6wwslgqwuqq5ifh.py # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] # Source node to ATen node mapping: # conv2d => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %div, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_2 = async_compile.triton('triton_poi_fused_convolution_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x2), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 1, 4), (4, 16, 16, 1), torch.float32) # Topologically Sorted Source Nodes: [mean, mean_1], Original ATen: [aten.mean] stream0 = get_raw_stream(0) triton_poi_fused_mean_0.run(primals_1, buf0, 16, grid=grid(16), stream=stream0) buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf3 = empty_strided_cuda((4, ), (1, ), torch.float32) buf5 = buf3; del buf3 # reuse buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [weight_mean, weight, std, weight_1], Original ATen: [aten.mean, aten.sub, aten.std, aten.div] triton_per_fused_div_mean_std_sub_1.run(buf5, primals_1, buf0, buf1, buf6, 4, 64, grid=grid(4), stream=stream0) del buf0 del buf1 # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf7 = extern_kernels.convolution(primals_3, buf6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf7, (4, 4, 1, 1), (4, 1, 1, 1)) buf8 = buf7; del buf7 # reuse # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] triton_poi_fused_convolution_2.run(buf8, primals_2, 16, grid=grid(16), stream=stream0) del primals_2 return (buf8, primals_1, primals_3, buf5, buf6, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.parallel import torch.nn.functional as F class Conv2d(nn.Conv2d): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True): super(Conv2d, self).__init__(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias) def forward(self, x): weight = self.weight weight_mean = weight.mean(dim=1, keepdim=True).mean(dim=2, keepdim=True ).mean(dim=3, keepdim=True) weight = weight - weight_mean std = weight.view(weight.size(0), -1).std(dim=1).view(-1, 1, 1, 1 ) + 1e-05 weight = weight / std.expand_as(weight) return F.conv2d(x, weight, self.bias, self.stride, self.padding, self.dilation, self.groups) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.nn.parallel assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mean_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tmp1 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask) tmp3 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask) tmp5 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask) tmp9 = tl.load(in_ptr0 + (4 + x0 + 64 * x1), xmask) tmp10 = tl.load(in_ptr0 + (20 + x0 + 64 * x1), xmask) tmp12 = tl.load(in_ptr0 + (36 + x0 + 64 * x1), xmask) tmp14 = tl.load(in_ptr0 + (52 + x0 + 64 * x1), xmask) tmp18 = tl.load(in_ptr0 + (8 + x0 + 64 * x1), xmask) tmp19 = tl.load(in_ptr0 + (24 + x0 + 64 * x1), xmask) tmp21 = tl.load(in_ptr0 + (40 + x0 + 64 * x1), xmask) tmp23 = tl.load(in_ptr0 + (56 + x0 + 64 * x1), xmask) tmp27 = tl.load(in_ptr0 + (12 + x0 + 64 * x1), xmask) tmp28 = tl.load(in_ptr0 + (28 + x0 + 64 * x1), xmask) tmp30 = tl.load(in_ptr0 + (44 + x0 + 64 * x1), xmask) tmp32 = tl.load(in_ptr0 + (60 + x0 + 64 * x1), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp11 = tmp9 + tmp10 tmp13 = tmp11 + tmp12 tmp15 = tmp13 + tmp14 tmp16 = tmp15 / tmp7 tmp17 = tmp8 + tmp16 tmp20 = tmp18 + tmp19 tmp22 = tmp20 + tmp21 tmp24 = tmp22 + tmp23 tmp25 = tmp24 / tmp7 tmp26 = tmp17 + tmp25 tmp29 = tmp27 + tmp28 tmp31 = tmp29 + tmp30 tmp33 = tmp31 + tmp32 tmp34 = tmp33 / tmp7 tmp35 = tmp26 + tmp34 tmp36 = tmp35 / tmp7 tl.store(out_ptr0 + x2, tmp36, xmask) @triton.jit def triton_per_fused_div_mean_std_sub_1(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0) tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = 4.0 tmp9 = tmp7 / tmp8 tmp10 = tmp0 - tmp9 tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK]) tl.where(xmask, tmp11, 0) tmp14 = tl.broadcast_to(tmp11, [XBLOCK, RBLOCK]) tmp16 = tl.where(xmask, tmp14, 0) tmp17 = tl.sum(tmp16, 1)[:, None] tmp18 = tl.full([XBLOCK, 1], 64, tl.int32) tmp19 = tmp18.to(tl.float32) tmp20 = tmp17 / tmp19 tmp21 = tmp11 - tmp20 tmp22 = tmp21 * tmp21 tmp23 = tl.broadcast_to(tmp22, [XBLOCK, RBLOCK]) tmp25 = tl.where(xmask, tmp23, 0) tmp26 = tl.sum(tmp25, 1)[:, None] tmp27 = 63.0 tmp28 = tmp26 / tmp27 tmp29 = libdevice.sqrt(tmp28) tmp30 = 1e-05 tmp31 = tmp29 + tmp30 tmp32 = tmp10 / tmp31 tl.store(out_ptr0 + (r1 + 64 * x0), tmp10, xmask) tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp29, xmask) tl.store(out_ptr1 + (r1 + 64 * x0), tmp32, xmask) @triton.jit def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 1, 4), (4, 16, 16, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mean_0[grid(16)](primals_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf3 = empty_strided_cuda((4,), (1,), torch.float32) buf5 = buf3 del buf3 buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_per_fused_div_mean_std_sub_1[grid(4)](buf5, primals_1, buf0, buf1, buf6, 4, 64, XBLOCK=1, num_warps=2, num_stages=1) del buf0 del buf1 buf7 = extern_kernels.convolution(primals_3, buf6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf7, (4, 4, 1, 1), (4, 1, 1, 1)) buf8 = buf7 del buf7 triton_poi_fused_convolution_2[grid(16)](buf8, primals_2, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_2 return buf8, primals_1, primals_3, buf5, buf6 class Conv2dNew(nn.Conv2d): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True): super(Conv2dNew, self).__init__(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias) def forward(self, input_0): primals_1 = self.weight primals_2 = self.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
T1anZhenYu/pytorch-classification
Conv2d
false
1,110
[ "MIT" ]
0
ad68e09f20a98541bcb437a7df8e7d14e8c21636
https://github.com/T1anZhenYu/pytorch-classification/tree/ad68e09f20a98541bcb437a7df8e7d14e8c21636
import torch import torch.nn as nn import torch.nn.parallel import torch.nn.functional as F class Model(nn.Conv2d): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True): super().__init__(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias) def forward(self, x): weight = self.weight weight_mean = weight.mean(dim=1, keepdim=True).mean(dim=2, keepdim=True ).mean(dim=3, keepdim=True) weight = weight - weight_mean std = weight.view(weight.size(0), -1).std(dim=1).view(-1, 1, 1, 1 ) + 1e-05 weight = weight / std.expand_as(weight) return F.conv2d(x, weight, self.bias, self.stride, self.padding, self.dilation, self.groups) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 4, 4]
lovasz_hinge
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/qm/cqmiebj3omt27mbkqpnnkw5z77l3s4xgfv6gqogfvmadhw2rd3m7.py # Topologically Sorted Source Nodes: [valid], Original ATen: [aten.ne] # Source node to ATen node mapping: # valid => ne # Graph fragment: # %ne : [num_users=1] = call_function[target=torch.ops.aten.ne.Scalar](args = (%view_1, 255), kwargs = {}) triton_poi_fused_ne_0 = async_compile.triton('triton_poi_fused_ne_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_ne_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_ne_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 255.0 tmp2 = tmp0 != tmp1 tl.store(out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((256, ), (1, ), torch.bool) # Topologically Sorted Source Nodes: [valid], Original ATen: [aten.ne] stream0 = get_raw_stream(0) triton_poi_fused_ne_0.run(arg1_1, buf0, 256, grid=grid(256), stream=stream0) return (reinterpret_tensor(arg0_1, (256, ), (1, ), 0), buf0, reinterpret_tensor(arg1_1, (256, ), (1, ), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.parallel import torch.utils.data from torchvision.transforms import functional as F import torch.nn.functional as F from torch.autograd import Variable def flatten_binary_scores(scores, labels, ignore=255): """ Flattens predictions in the batch (binary case) Remove labels equal to 'ignore' """ scores = scores.view(-1) labels = labels.view(-1) if ignore is None: return scores, labels valid = labels != ignore vscores = scores[valid] vlabels = labels[valid] return vscores, vlabels def lovasz_grad(gt_sorted): """ Computes gradient of the Lovasz extension w.r.t sorted errors See Alg. 1 in paper """ p = len(gt_sorted) gts = gt_sorted.sum() intersection = gts.float() - gt_sorted.float().cumsum(0) union = gts.float() + (1 - gt_sorted).float().cumsum(0) jaccard = 1.0 - intersection / union if p > 1: jaccard[1:p] = jaccard[1:p] - jaccard[0:-1] return jaccard def isnan(x): return x != x def mean(l, ignore_nan=False, empty=0): """ nanmean compatible with generators. """ l = iter(l) if ignore_nan: l = ifilterfalse(isnan, l) try: n = 1 acc = next(l) except StopIteration: if empty == 'raise': raise ValueError('Empty mean') return empty for n, v in enumerate(l, 2): acc += v if n == 1: return acc return acc / n class lovasz_hinge(torch.nn.Module): def __init__(self, per_img=False, ignore=255): """ :param weight: 1D weight vector to deal with the class-imbalance """ super().__init__() self.per_image = per_img self.ignore = ignore def lovasz_hinge_flat(self, logits, labels): """ Binary Lovasz hinge loss logits: [P] Variable, logits at each prediction (between -\\infty and +\\infty) labels: [P] Tensor, binary ground truth labels (0 or 1) ignore: label to ignore """ if len(labels) == 0: return logits.sum() * 0.0 signs = 2.0 * labels.float() - 1.0 errors = 1.0 - logits * Variable(signs) errors_sorted, perm = torch.sort(errors, dim=0, descending=True) perm = perm.data gt_sorted = labels[perm] grad = lovasz_grad(gt_sorted) loss = torch.dot(F.relu(errors_sorted), Variable(grad)) return loss def forward(self, logits, labels): """ Binary Lovasz hinge loss logits: [B, H, W] Variable, logits at each pixel (between -\\infty and +\\infty) labels: [B, H, W] Tensor, binary ground truth masks (0 or 1) per_image: compute the loss per image instead of per batch ignore: void class id """ if self.per_image: loss = mean(self.lovasz_hinge_flat(*flatten_binary_scores(log. unsqueeze(0), lab.unsqueeze(0), self.ignore)) for log, lab in zip(logits, labels)) else: loss = self.lovasz_hinge_flat(*flatten_binary_scores(logits, labels, self.ignore)) return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn.parallel import torch.utils.data from torchvision.transforms import functional as F import torch.nn.functional as F from torch.autograd import Variable assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_ne_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 255.0 tmp2 = tmp0 != tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((256,), (1,), torch.bool) get_raw_stream(0) triton_poi_fused_ne_0[grid(256)](arg1_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) return reinterpret_tensor(arg0_1, (256,), (1,), 0 ), buf0, reinterpret_tensor(arg1_1, (256,), (1,), 0) def flatten_binary_scores(scores, labels, ignore=255): """ Flattens predictions in the batch (binary case) Remove labels equal to 'ignore' """ scores = scores.view(-1) labels = labels.view(-1) if ignore is None: return scores, labels valid = labels != ignore vscores = scores[valid] vlabels = labels[valid] return vscores, vlabels def lovasz_grad(gt_sorted): """ Computes gradient of the Lovasz extension w.r.t sorted errors See Alg. 1 in paper """ p = len(gt_sorted) gts = gt_sorted.sum() intersection = gts.float() - gt_sorted.float().cumsum(0) union = gts.float() + (1 - gt_sorted).float().cumsum(0) jaccard = 1.0 - intersection / union if p > 1: jaccard[1:p] = jaccard[1:p] - jaccard[0:-1] return jaccard def isnan(x): return x != x def mean(l, ignore_nan=False, empty=0): """ nanmean compatible with generators. """ l = iter(l) if ignore_nan: l = ifilterfalse(isnan, l) try: n = 1 acc = next(l) except StopIteration: if empty == 'raise': raise ValueError('Empty mean') return empty for n, v in enumerate(l, 2): acc += v if n == 1: return acc return acc / n class lovasz_hingeNew(torch.nn.Module): def __init__(self, per_img=False, ignore=255): """ :param weight: 1D weight vector to deal with the class-imbalance """ super().__init__() self.per_image = per_img self.ignore = ignore def lovasz_hinge_flat(self, logits, labels): """ Binary Lovasz hinge loss logits: [P] Variable, logits at each prediction (between -\\infty and +\\infty) labels: [P] Tensor, binary ground truth labels (0 or 1) ignore: label to ignore """ if len(labels) == 0: return logits.sum() * 0.0 signs = 2.0 * labels.float() - 1.0 errors = 1.0 - logits * Variable(signs) errors_sorted, perm = torch.sort(errors, dim=0, descending=True) perm = perm.data gt_sorted = labels[perm] grad = lovasz_grad(gt_sorted) loss = torch.dot(F.relu(errors_sorted), Variable(grad)) return loss def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
PhillipHuang2017/ext_portrait_segmentation
lovasz_hinge
false
1,111
[ "MIT" ]
0
6d0cec0a953dacbc94a01ea8b719feb687b7c029
https://github.com/PhillipHuang2017/ext_portrait_segmentation/tree/6d0cec0a953dacbc94a01ea8b719feb687b7c029
import torch import torch.nn.parallel import torch.utils.data from torchvision.transforms import functional as F import torch.nn.functional as F from torch.autograd import Variable def flatten_binary_scores(scores, labels, ignore=255): """ Flattens predictions in the batch (binary case) Remove labels equal to 'ignore' """ scores = scores.view(-1) labels = labels.view(-1) if ignore is None: return scores, labels valid = labels != ignore vscores = scores[valid] vlabels = labels[valid] return vscores, vlabels def lovasz_grad(gt_sorted): """ Computes gradient of the Lovasz extension w.r.t sorted errors See Alg. 1 in paper """ p = len(gt_sorted) gts = gt_sorted.sum() intersection = gts.float() - gt_sorted.float().cumsum(0) union = gts.float() + (1 - gt_sorted).float().cumsum(0) jaccard = 1.0 - intersection / union if p > 1: jaccard[1:p] = jaccard[1:p] - jaccard[0:-1] return jaccard def isnan(x): return x != x def mean(l, ignore_nan=False, empty=0): """ nanmean compatible with generators. """ l = iter(l) if ignore_nan: l = ifilterfalse(isnan, l) try: n = 1 acc = next(l) except StopIteration: if empty == 'raise': raise ValueError('Empty mean') return empty for n, v in enumerate(l, 2): acc += v if n == 1: return acc return acc / n class Model(torch.nn.Module): def __init__(self, per_img=False, ignore=255): """ :param weight: 1D weight vector to deal with the class-imbalance """ super().__init__() self.per_image = per_img self.ignore = ignore def lovasz_hinge_flat(self, logits, labels): """ Binary Lovasz hinge loss logits: [P] Variable, logits at each prediction (between -\\infty and +\\infty) labels: [P] Tensor, binary ground truth labels (0 or 1) ignore: label to ignore """ if len(labels) == 0: return logits.sum() * 0.0 signs = 2.0 * labels.float() - 1.0 errors = 1.0 - logits * Variable(signs) errors_sorted, perm = torch.sort(errors, dim=0, descending=True) perm = perm.data gt_sorted = labels[perm] grad = lovasz_grad(gt_sorted) loss = torch.dot(F.relu(errors_sorted), Variable(grad)) return loss def forward(self, logits, labels): """ Binary Lovasz hinge loss logits: [B, H, W] Variable, logits at each pixel (between -\\infty and +\\infty) labels: [B, H, W] Tensor, binary ground truth masks (0 or 1) per_image: compute the loss per image instead of per batch ignore: void class id """ if self.per_image: loss = mean(self.lovasz_hinge_flat(*flatten_binary_scores(log. unsqueeze(0), lab.unsqueeze(0), self.ignore)) for log, lab in zip(logits, labels)) else: loss = self.lovasz_hinge_flat(*flatten_binary_scores(logits, labels, self.ignore)) return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
AlignEA
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/az/cazasdcnfd4vdvnvqp76gldss6jpfbd546uzzb2vc5bkonvjunrj.py # Topologically Sorted Source Nodes: [add, sub, pow_1, sum_1], Original ATen: [aten.add, aten.sub, aten.pow, aten.sum] # Source node to ATen node mapping: # add => add # pow_1 => pow_1 # sub => sub # sum_1 => sum_1 # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg0_1, %arg1_1), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %arg2_1), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1]), kwargs = {}) triton_poi_fused_add_pow_sub_sum_0 = async_compile.triton('triton_poi_fused_add_pow_sub_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_pow_sub_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 12, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_pow_sub_sum_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = (xindex // 16) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask) tmp1 = tl.load(in_ptr1 + (x0 + (64*x1)), xmask) tmp3 = tl.load(in_ptr2 + (x0 + (64*x1)), xmask) tmp6 = tl.load(in_ptr0 + (16 + x0 + (64*x1)), xmask) tmp7 = tl.load(in_ptr1 + (16 + x0 + (64*x1)), xmask) tmp9 = tl.load(in_ptr2 + (16 + x0 + (64*x1)), xmask) tmp13 = tl.load(in_ptr0 + (32 + x0 + (64*x1)), xmask) tmp14 = tl.load(in_ptr1 + (32 + x0 + (64*x1)), xmask) tmp16 = tl.load(in_ptr2 + (32 + x0 + (64*x1)), xmask) tmp20 = tl.load(in_ptr0 + (48 + x0 + (64*x1)), xmask) tmp21 = tl.load(in_ptr1 + (48 + x0 + (64*x1)), xmask) tmp23 = tl.load(in_ptr2 + (48 + x0 + (64*x1)), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp5 = tmp4 * tmp4 tmp8 = tmp6 + tmp7 tmp10 = tmp8 - tmp9 tmp11 = tmp10 * tmp10 tmp12 = tmp5 + tmp11 tmp15 = tmp13 + tmp14 tmp17 = tmp15 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp12 + tmp18 tmp22 = tmp20 + tmp21 tmp24 = tmp22 - tmp23 tmp25 = tmp24 * tmp24 tmp26 = tmp19 + tmp25 tl.store(out_ptr0 + (x2), tmp26, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [add, sub, pow_1, sum_1], Original ATen: [aten.add, aten.sub, aten.pow, aten.sum] stream0 = get_raw_stream(0) triton_poi_fused_add_pow_sub_sum_0.run(arg0_1, arg1_1, arg2_1, buf0, 64, grid=grid(64), stream=stream0) del arg0_1 del arg1_1 del arg2_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1, arg2_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F class AlignEA(torch.nn.Module): def __init__(self, p, feat_drop, params): super(AlignEA, self).__init__() self.params = params def forward(self, e1, r, e2): return torch.sum(torch.pow(e1 + r - e2, 2), 1) def only_pos_loss(self, e1, r, e2): return -F.logsigmoid(-torch.sum(torch.pow(e1 + r - e2, 2), 1)).sum() def loss(self, pos_score, neg_score, target): return F.relu(pos_score - self.params[0]).sum() + self.params[1 ] * F.relu(self.params[2] - neg_score).sum() def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {'p': 4, 'feat_drop': 4, 'params': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_pow_sub_sum_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tmp1 = tl.load(in_ptr1 + (x0 + 64 * x1), xmask) tmp3 = tl.load(in_ptr2 + (x0 + 64 * x1), xmask) tmp6 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask) tmp7 = tl.load(in_ptr1 + (16 + x0 + 64 * x1), xmask) tmp9 = tl.load(in_ptr2 + (16 + x0 + 64 * x1), xmask) tmp13 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask) tmp14 = tl.load(in_ptr1 + (32 + x0 + 64 * x1), xmask) tmp16 = tl.load(in_ptr2 + (32 + x0 + 64 * x1), xmask) tmp20 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask) tmp21 = tl.load(in_ptr1 + (48 + x0 + 64 * x1), xmask) tmp23 = tl.load(in_ptr2 + (48 + x0 + 64 * x1), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp5 = tmp4 * tmp4 tmp8 = tmp6 + tmp7 tmp10 = tmp8 - tmp9 tmp11 = tmp10 * tmp10 tmp12 = tmp5 + tmp11 tmp15 = tmp13 + tmp14 tmp17 = tmp15 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp12 + tmp18 tmp22 = tmp20 + tmp21 tmp24 = tmp22 - tmp23 tmp25 = tmp24 * tmp24 tmp26 = tmp19 + tmp25 tl.store(out_ptr0 + x2, tmp26, xmask) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_pow_sub_sum_0[grid(64)](arg0_1, arg1_1, arg2_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 del arg1_1 del arg2_1 return buf0, class AlignEANew(torch.nn.Module): def __init__(self, p, feat_drop, params): super(AlignEANew, self).__init__() self.params = params def only_pos_loss(self, e1, r, e2): return -F.logsigmoid(-torch.sum(torch.pow(e1 + r - e2, 2), 1)).sum() def loss(self, pos_score, neg_score, target): return F.relu(pos_score - self.params[0]).sum() + self.params[1 ] * F.relu(self.params[2] - neg_score).sum() def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
TMUITLab/EAFR
AlignEA
false
1,112
[ "MIT" ]
0
dadb6485d48711ccb8aa2f03760aeb437645f1ff
https://github.com/TMUITLab/EAFR/tree/dadb6485d48711ccb8aa2f03760aeb437645f1ff
import torch import torch.nn.functional as F class Model(torch.nn.Module): def __init__(self, p, feat_drop, params): super().__init__() self.params = params def forward(self, e1, r, e2): return torch.sum(torch.pow(e1 + r - e2, 2), 1) def only_pos_loss(self, e1, r, e2): return -F.logsigmoid(-torch.sum(torch.pow(e1 + r - e2, 2), 1)).sum() def loss(self, pos_score, neg_score, target): return F.relu(pos_score - self.params[0]).sum() + self.params[1 ] * F.relu(self.params[2] - neg_score).sum() def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [4, 4, 4]
fpn_module
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/w5/cw5skfgpv5nrk42leelcp4yltyqwex7mnbyce3n3bj4ebm2z7azu.py # Topologically Sorted Source Nodes: [p5], Original ATen: [aten.convolution] # Source node to ATen node mapping: # p5 => convolution # Graph fragment: # %convolution : [num_users=6] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4194304], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4194304 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 4096) % 256 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, None) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/ib/cib2eq5hg22wfnjfpqps64l2xqhjhgautrnryu7xii6hu5r6ufgg.py # Topologically Sorted Source Nodes: [interpolate], Original ATen: [aten._to_copy] # Source node to ATen node mapping: # interpolate => convert_element_type_1 # Graph fragment: # %convert_element_type_1 : [num_users=15] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%view, torch.int64), kwargs = {}) triton_poi_fused__to_copy_1 = async_compile.triton('triton_poi_fused__to_copy_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__to_copy_1(out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 1.0 tmp3 = tmp1 * tmp2 tmp4 = 0.0 tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp6 = tmp5.to(tl.int32) tl.store(out_ptr0 + (x0), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/wr/cwrn4qfpnl5h5jxhhbi2ktzjxlgmnqsqet77wksszluqa7tvrvn2.py # Topologically Sorted Source Nodes: [interpolate], Original ATen: [aten.add, aten.clamp] # Source node to ATen node mapping: # interpolate => add, clamp_max # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_1, 1), kwargs = {}) # %clamp_max : [num_users=13] = call_function[target=torch.ops.aten.clamp_max.default](args = (%add, 63), kwargs = {}) triton_poi_fused_add_clamp_2 = async_compile.triton('triton_poi_fused_add_clamp_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_clamp_2(out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 1.0 tmp3 = tmp1 * tmp2 tmp4 = 0.0 tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp6 = tmp5.to(tl.int32) tmp7 = tl.full([1], 1, tl.int64) tmp8 = tmp6 + tmp7 tmp9 = tl.full([1], 63, tl.int64) tmp10 = triton_helpers.minimum(tmp8, tmp9) tl.store(out_ptr0 + (x0), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/rk/crkhx22brl3wt2ooij4yqrdzmhxwkdsxlxgfpso5oebqhikf32hy.py # Topologically Sorted Source Nodes: [interpolate], Original ATen: [aten.arange, aten._to_copy, aten.mul, aten.clamp, aten.sub] # Source node to ATen node mapping: # interpolate => clamp_max_2, clamp_min, clamp_min_2, convert_element_type, iota, mul, sub # Graph fragment: # %iota : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (64,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False}) # %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%iota, torch.float32), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convert_element_type, 1.0), kwargs = {}) # %clamp_min : [num_users=3] = call_function[target=torch.ops.aten.clamp_min.default](args = (%mul, 0.0), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clamp_min, %convert_element_type_3), kwargs = {}) # %clamp_min_2 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub, 0.0), kwargs = {}) # %clamp_max_2 : [num_users=13] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_2, 1.0), kwargs = {}) triton_poi_fused__to_copy_arange_clamp_mul_sub_3 = async_compile.triton('triton_poi_fused__to_copy_arange_clamp_mul_sub_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_arange_clamp_mul_sub_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__to_copy_arange_clamp_mul_sub_3(out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 1.0 tmp3 = tmp1 * tmp2 tmp4 = 0.0 tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp6 = tmp5.to(tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 - tmp7 tmp9 = triton_helpers.maximum(tmp8, tmp4) tmp10 = triton_helpers.minimum(tmp9, tmp2) tl.store(out_ptr0 + (x0), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/dn/cdno47sb4iuwlzp3u5bnrx5nfsdaxpfpneo66rlgu43bh4opcxpe.py # Topologically Sorted Source Nodes: [conv2d_1, interpolate, p4], Original ATen: [aten.convolution, aten._unsafe_index, aten.sub, aten.mul, aten.add] # Source node to ATen node mapping: # conv2d_1 => convolution_1 # interpolate => _unsafe_index, _unsafe_index_1, _unsafe_index_2, _unsafe_index_3, add_2, add_3, add_4, mul_2, mul_3, mul_4, sub_1, sub_2, sub_4 # p4 => add_5 # Graph fragment: # %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_6, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %_unsafe_index : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%convolution, [None, None, %convert_element_type_1, %convert_element_type_3]), kwargs = {}) # %_unsafe_index_1 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%convolution, [None, None, %convert_element_type_1, %clamp_max_1]), kwargs = {}) # %_unsafe_index_2 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%convolution, [None, None, %clamp_max, %convert_element_type_3]), kwargs = {}) # %_unsafe_index_3 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%convolution, [None, None, %clamp_max, %clamp_max_1]), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_1, %_unsafe_index), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %clamp_max_2), kwargs = {}) # %add_2 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index, %mul_2), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_3, %_unsafe_index_2), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_2, %clamp_max_2), kwargs = {}) # %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_2, %mul_3), kwargs = {}) # %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_3, %add_2), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_4, %clamp_max_3), kwargs = {}) # %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_2, %mul_4), kwargs = {}) # %add_5 : [num_users=6] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_4, %convolution_1), kwargs = {}) triton_poi_fused__unsafe_index_add_convolution_mul_sub_4 = async_compile.triton('triton_poi_fused__unsafe_index_add_convolution_mul_sub_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4194304], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i64', 2: '*i64', 3: '*fp32', 4: '*i64', 5: '*fp32', 6: '*i64', 7: '*fp32', 8: '*fp32', 9: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_add_convolution_mul_sub_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__unsafe_index_add_convolution_mul_sub_4(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, xnumel, XBLOCK : tl.constexpr): xnumel = 4194304 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x1 = (xindex // 64) % 64 x0 = xindex % 64 x2 = (xindex // 4096) x6 = xindex x3 = (xindex // 4096) % 256 tmp0 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr3 + (x0), None, eviction_policy='evict_last') tmp16 = tl.load(in_ptr4 + (x0), None, eviction_policy='evict_last') tmp19 = tl.load(in_ptr5 + (x1), None, eviction_policy='evict_last') tmp29 = tl.load(in_ptr6 + (x1), None, eviction_policy='evict_last') tmp32 = tl.load(in_out_ptr0 + (x6), None) tmp33 = tl.load(in_ptr7 + (x3), None, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 64, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tmp6 = tmp5 + tmp1 tmp7 = tmp5 < 0 tmp8 = tl.where(tmp7, tmp6, tmp5) tmp9 = tl.load(in_ptr2 + (tmp8 + (64*tmp4) + (4096*x2)), None, eviction_policy='evict_last') tmp11 = tmp10 + tmp1 tmp12 = tmp10 < 0 tmp13 = tl.where(tmp12, tmp11, tmp10) tmp14 = tl.load(in_ptr2 + (tmp13 + (64*tmp4) + (4096*x2)), None, eviction_policy='evict_last') tmp15 = tmp14 - tmp9 tmp17 = tmp15 * tmp16 tmp18 = tmp9 + tmp17 tmp20 = tmp19 + tmp1 tmp21 = tmp19 < 0 tmp22 = tl.where(tmp21, tmp20, tmp19) tmp23 = tl.load(in_ptr2 + (tmp8 + (64*tmp22) + (4096*x2)), None, eviction_policy='evict_last') tmp24 = tl.load(in_ptr2 + (tmp13 + (64*tmp22) + (4096*x2)), None, eviction_policy='evict_last') tmp25 = tmp24 - tmp23 tmp26 = tmp25 * tmp16 tmp27 = tmp23 + tmp26 tmp28 = tmp27 - tmp18 tmp30 = tmp28 * tmp29 tmp31 = tmp18 + tmp30 tmp34 = tmp32 + tmp33 tmp35 = tmp31 + tmp34 tl.store(in_out_ptr0 + (x6), tmp35, None) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/he/che3l4rv6rafbjk6jebrweq77ki4pey5sowezfrn6tmor5h7vaw6.py # Topologically Sorted Source Nodes: [p5_1, p4_1, p3_1, p5_2, p4_2, p3_2], Original ATen: [aten.convolution, aten._unsafe_index, aten.sub, aten.mul, aten.add] # Source node to ATen node mapping: # p3_1 => convolution_9 # p3_2 => _unsafe_index_20, _unsafe_index_21, _unsafe_index_22, _unsafe_index_23, add_30, add_31, mul_27, mul_28, mul_29, sub_26, sub_27, sub_29 # p4_1 => convolution_7 # p4_2 => _unsafe_index_16, _unsafe_index_17, _unsafe_index_18, _unsafe_index_19, add_25, add_26, mul_22, mul_23, mul_24, sub_21, sub_22, sub_24 # p5_1 => convolution_5 # p5_2 => _unsafe_index_12, _unsafe_index_13, _unsafe_index_14, _unsafe_index_15, add_20, add_21, mul_17, mul_18, mul_19, sub_16, sub_17, sub_19 # Graph fragment: # %convolution_5 : [num_users=4] = call_function[target=torch.ops.aten.convolution.default](args = (%convolution_4, %primals_15, %primals_16, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %convolution_7 : [num_users=4] = call_function[target=torch.ops.aten.convolution.default](args = (%convolution_6, %primals_19, %primals_20, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %convolution_9 : [num_users=4] = call_function[target=torch.ops.aten.convolution.default](args = (%convolution_8, %primals_23, %primals_24, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %_unsafe_index_12 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%convolution_5, [None, None, %convert_element_type_1, %convert_element_type_3]), kwargs = {}) # %_unsafe_index_13 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%convolution_5, [None, None, %convert_element_type_1, %clamp_max_1]), kwargs = {}) # %_unsafe_index_14 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%convolution_5, [None, None, %clamp_max, %convert_element_type_3]), kwargs = {}) # %_unsafe_index_15 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%convolution_5, [None, None, %clamp_max, %clamp_max_1]), kwargs = {}) # %sub_16 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_13, %_unsafe_index_12), kwargs = {}) # %mul_17 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_16, %clamp_max_2), kwargs = {}) # %add_20 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_12, %mul_17), kwargs = {}) # %sub_17 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_15, %_unsafe_index_14), kwargs = {}) # %mul_18 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_17, %clamp_max_2), kwargs = {}) # %add_21 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_14, %mul_18), kwargs = {}) # %sub_19 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_21, %add_20), kwargs = {}) # %mul_19 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_19, %clamp_max_3), kwargs = {}) # %_unsafe_index_16 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%convolution_7, [None, None, %convert_element_type_1, %convert_element_type_3]), kwargs = {}) # %_unsafe_index_17 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%convolution_7, [None, None, %convert_element_type_1, %clamp_max_1]), kwargs = {}) # %_unsafe_index_18 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%convolution_7, [None, None, %clamp_max, %convert_element_type_3]), kwargs = {}) # %_unsafe_index_19 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%convolution_7, [None, None, %clamp_max, %clamp_max_1]), kwargs = {}) # %sub_21 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_17, %_unsafe_index_16), kwargs = {}) # %mul_22 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_21, %clamp_max_2), kwargs = {}) # %add_25 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_16, %mul_22), kwargs = {}) # %sub_22 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_19, %_unsafe_index_18), kwargs = {}) # %mul_23 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_22, %clamp_max_2), kwargs = {}) # %add_26 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_18, %mul_23), kwargs = {}) # %sub_24 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_26, %add_25), kwargs = {}) # %mul_24 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_24, %clamp_max_3), kwargs = {}) # %_unsafe_index_20 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%convolution_9, [None, None, %convert_element_type_1, %convert_element_type_3]), kwargs = {}) # %_unsafe_index_21 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%convolution_9, [None, None, %convert_element_type_1, %clamp_max_1]), kwargs = {}) # %_unsafe_index_22 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%convolution_9, [None, None, %clamp_max, %convert_element_type_3]), kwargs = {}) # %_unsafe_index_23 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%convolution_9, [None, None, %clamp_max, %clamp_max_1]), kwargs = {}) # %sub_26 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_21, %_unsafe_index_20), kwargs = {}) # %mul_27 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_26, %clamp_max_2), kwargs = {}) # %add_30 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_20, %mul_27), kwargs = {}) # %sub_27 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_23, %_unsafe_index_22), kwargs = {}) # %mul_28 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_27, %clamp_max_2), kwargs = {}) # %add_31 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_22, %mul_28), kwargs = {}) # %sub_29 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_31, %add_30), kwargs = {}) # %mul_29 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_29, %clamp_max_3), kwargs = {}) triton_poi_fused__unsafe_index_add_convolution_mul_sub_5 = async_compile.triton('triton_poi_fused__unsafe_index_add_convolution_mul_sub_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2097152], filename=__file__, triton_meta={'signature': {0: '*i64', 1: '*i64', 2: '*fp32', 3: '*fp32', 4: '*i64', 5: '*fp32', 6: '*i64', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: '*fp32', 11: '*fp32', 12: '*fp32', 13: '*fp32', 14: '*fp32', 15: '*fp32', 16: '*fp32', 17: '*fp32', 18: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_add_convolution_mul_sub_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__unsafe_index_add_convolution_mul_sub_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10, in_ptr11, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, xnumel, XBLOCK : tl.constexpr): xnumel = 2097152 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x1 = (xindex // 64) % 64 x0 = xindex % 64 x5 = (xindex // 4096) x2 = (xindex // 4096) % 128 x6 = xindex tmp0 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr3 + (x2), None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + (x0), None, eviction_policy='evict_last') tmp19 = tl.load(in_ptr5 + (x0), None, eviction_policy='evict_last') tmp22 = tl.load(in_ptr6 + (x1), None, eviction_policy='evict_last') tmp34 = tl.load(in_ptr7 + (x1), None, eviction_policy='evict_last') tmp37 = tl.load(in_ptr9 + (x2), None, eviction_policy='evict_last') tmp54 = tl.load(in_ptr11 + (x2), None, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 64, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tmp6 = tmp5 + tmp1 tmp7 = tmp5 < 0 tmp8 = tl.where(tmp7, tmp6, tmp5) tmp9 = tl.load(in_ptr2 + (tmp8 + (64*tmp4) + (4096*x5)), None, eviction_policy='evict_last') tmp11 = tmp9 + tmp10 tmp13 = tmp12 + tmp1 tmp14 = tmp12 < 0 tmp15 = tl.where(tmp14, tmp13, tmp12) tmp16 = tl.load(in_ptr2 + (tmp15 + (64*tmp4) + (4096*x5)), None, eviction_policy='evict_last') tmp17 = tmp16 + tmp10 tmp18 = tmp17 - tmp11 tmp20 = tmp18 * tmp19 tmp21 = tmp11 + tmp20 tmp23 = tmp22 + tmp1 tmp24 = tmp22 < 0 tmp25 = tl.where(tmp24, tmp23, tmp22) tmp26 = tl.load(in_ptr2 + (tmp8 + (64*tmp25) + (4096*x5)), None, eviction_policy='evict_last') tmp27 = tmp26 + tmp10 tmp28 = tl.load(in_ptr2 + (tmp15 + (64*tmp25) + (4096*x5)), None, eviction_policy='evict_last') tmp29 = tmp28 + tmp10 tmp30 = tmp29 - tmp27 tmp31 = tmp30 * tmp19 tmp32 = tmp27 + tmp31 tmp33 = tmp32 - tmp21 tmp35 = tmp33 * tmp34 tmp36 = tl.load(in_ptr8 + (tmp8 + (64*tmp4) + (4096*x5)), None, eviction_policy='evict_last') tmp38 = tmp36 + tmp37 tmp39 = tl.load(in_ptr8 + (tmp15 + (64*tmp4) + (4096*x5)), None, eviction_policy='evict_last') tmp40 = tmp39 + tmp37 tmp41 = tmp40 - tmp38 tmp42 = tmp41 * tmp19 tmp43 = tmp38 + tmp42 tmp44 = tl.load(in_ptr8 + (tmp8 + (64*tmp25) + (4096*x5)), None, eviction_policy='evict_last') tmp45 = tmp44 + tmp37 tmp46 = tl.load(in_ptr8 + (tmp15 + (64*tmp25) + (4096*x5)), None, eviction_policy='evict_last') tmp47 = tmp46 + tmp37 tmp48 = tmp47 - tmp45 tmp49 = tmp48 * tmp19 tmp50 = tmp45 + tmp49 tmp51 = tmp50 - tmp43 tmp52 = tmp51 * tmp34 tmp53 = tl.load(in_ptr10 + (tmp8 + (64*tmp4) + (4096*x5)), None, eviction_policy='evict_last') tmp55 = tmp53 + tmp54 tmp56 = tl.load(in_ptr10 + (tmp15 + (64*tmp4) + (4096*x5)), None, eviction_policy='evict_last') tmp57 = tmp56 + tmp54 tmp58 = tmp57 - tmp55 tmp59 = tmp58 * tmp19 tmp60 = tmp55 + tmp59 tmp61 = tl.load(in_ptr10 + (tmp8 + (64*tmp25) + (4096*x5)), None, eviction_policy='evict_last') tmp62 = tmp61 + tmp54 tmp63 = tl.load(in_ptr10 + (tmp15 + (64*tmp25) + (4096*x5)), None, eviction_policy='evict_last') tmp64 = tmp63 + tmp54 tmp65 = tmp64 - tmp62 tmp66 = tmp65 * tmp19 tmp67 = tmp62 + tmp66 tmp68 = tmp67 - tmp60 tmp69 = tmp68 * tmp34 tl.store(out_ptr0 + (x6), tmp21, None) tl.store(out_ptr1 + (x6), tmp35, None) tl.store(out_ptr2 + (x6), tmp43, None) tl.store(out_ptr3 + (x6), tmp52, None) tl.store(out_ptr4 + (x6), tmp60, None) tl.store(out_ptr5 + (x6), tmp69, None) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/qq/cqqy6dwgchmh6da5wvexvll4djghft4xnm4crpatgerre2lzchix.py # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] # Source node to ATen node mapping: # cat => cat # Graph fragment: # %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%add_22, %add_27, %add_32, %convolution_11], 1), kwargs = {}) triton_poi_fused_cat_6 = async_compile.triton('triton_poi_fused_cat_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8388608], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 8388608 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x1 = (xindex // 4096) % 512 x0 = xindex % 4096 x2 = (xindex // 2097152) x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 128, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + (4096*x1) + (524288*x2)), tmp4, other=0.0) tmp6 = tl.load(in_ptr1 + (x0 + (4096*x1) + (524288*x2)), tmp4, other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype) tmp9 = tl.where(tmp4, tmp7, tmp8) tmp10 = tmp0 >= tmp3 tmp11 = tl.full([1], 256, tl.int64) tmp12 = tmp0 < tmp11 tmp13 = tmp10 & tmp12 tmp14 = tl.load(in_ptr2 + (x0 + (4096*((-128) + x1)) + (524288*x2)), tmp13, other=0.0) tmp15 = tl.load(in_ptr3 + (x0 + (4096*((-128) + x1)) + (524288*x2)), tmp13, other=0.0) tmp16 = tmp14 + tmp15 tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype) tmp18 = tl.where(tmp13, tmp16, tmp17) tmp19 = tmp0 >= tmp11 tmp20 = tl.full([1], 384, tl.int64) tmp21 = tmp0 < tmp20 tmp22 = tmp19 & tmp21 tmp23 = tl.load(in_ptr4 + (x0 + (4096*((-256) + x1)) + (524288*x2)), tmp22, other=0.0) tmp24 = tl.load(in_ptr5 + (x0 + (4096*((-256) + x1)) + (524288*x2)), tmp22, other=0.0) tmp25 = tmp23 + tmp24 tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype) tmp27 = tl.where(tmp22, tmp25, tmp26) tmp28 = tmp0 >= tmp20 tmp29 = tl.full([1], 512, tl.int64) tmp30 = tmp0 < tmp29 tmp31 = tl.load(in_ptr6 + (x0 + (4096*((-384) + x1)) + (524288*x2)), tmp28, other=0.0) tmp32 = tl.load(in_ptr7 + ((-384) + x1), tmp28, eviction_policy='evict_last', other=0.0) tmp33 = tmp31 + tmp32 tmp34 = tl.full(tmp33.shape, 0.0, tmp33.dtype) tmp35 = tl.where(tmp28, tmp33, tmp34) tmp36 = tl.where(tmp22, tmp27, tmp35) tmp37 = tl.where(tmp13, tmp18, tmp36) tmp38 = tl.where(tmp4, tmp9, tmp37) tl.store(out_ptr0 + (x3), tmp38, None) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/aa/caad7kyjzosottzpivtkindhaxpfkjxgqczfry2hxfky2fvdwwvu.py # Topologically Sorted Source Nodes: [output], Original ATen: [aten.convolution] # Source node to ATen node mapping: # output => convolution_12 # Graph fragment: # %convolution_12 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%cat, %primals_29, %primals_30, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_7 = async_compile.triton('triton_poi_fused_convolution_7', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_7', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 65536 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 4096) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30 = args args.clear() assert_size_stride(primals_1, (256, 2048, 1, 1), (2048, 1, 1, 1)) assert_size_stride(primals_2, (256, ), (1, )) assert_size_stride(primals_3, (4, 2048, 64, 64), (8388608, 4096, 64, 1)) assert_size_stride(primals_4, (256, 1024, 1, 1), (1024, 1, 1, 1)) assert_size_stride(primals_5, (256, ), (1, )) assert_size_stride(primals_6, (4, 1024, 64, 64), (4194304, 4096, 64, 1)) assert_size_stride(primals_7, (256, 512, 1, 1), (512, 1, 1, 1)) assert_size_stride(primals_8, (256, ), (1, )) assert_size_stride(primals_9, (4, 512, 64, 64), (2097152, 4096, 64, 1)) assert_size_stride(primals_10, (256, 256, 1, 1), (256, 1, 1, 1)) assert_size_stride(primals_11, (256, ), (1, )) assert_size_stride(primals_12, (4, 256, 64, 64), (1048576, 4096, 64, 1)) assert_size_stride(primals_13, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_14, (256, ), (1, )) assert_size_stride(primals_15, (128, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_16, (128, ), (1, )) assert_size_stride(primals_17, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_18, (256, ), (1, )) assert_size_stride(primals_19, (128, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_20, (128, ), (1, )) assert_size_stride(primals_21, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_22, (256, ), (1, )) assert_size_stride(primals_23, (128, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_24, (128, ), (1, )) assert_size_stride(primals_25, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_26, (256, ), (1, )) assert_size_stride(primals_27, (128, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_28, (128, ), (1, )) assert_size_stride(primals_29, (4, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_30, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [p5], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 256, 64, 64), (1048576, 4096, 64, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [p5], Original ATen: [aten.convolution] stream0 = get_raw_stream(0) triton_poi_fused_convolution_0.run(buf1, primals_2, 4194304, grid=grid(4194304), stream=stream0) del primals_2 # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(primals_6, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 256, 64, 64), (1048576, 4096, 64, 1)) buf3 = empty_strided_cuda((64, 1), (1, 1), torch.int64) # Topologically Sorted Source Nodes: [interpolate], Original ATen: [aten._to_copy] triton_poi_fused__to_copy_1.run(buf3, 64, grid=grid(64), stream=stream0) buf4 = empty_strided_cuda((64, 1), (1, 1), torch.int64) # Topologically Sorted Source Nodes: [interpolate], Original ATen: [aten.add, aten.clamp] triton_poi_fused_add_clamp_2.run(buf4, 64, grid=grid(64), stream=stream0) buf5 = empty_strided_cuda((64, ), (1, ), torch.int64) # Topologically Sorted Source Nodes: [interpolate], Original ATen: [aten.arange, aten._to_copy, aten.mul, aten.clamp] triton_poi_fused__to_copy_1.run(buf5, 64, grid=grid(64), stream=stream0) buf6 = empty_strided_cuda((64, ), (1, ), torch.int64) # Topologically Sorted Source Nodes: [interpolate], Original ATen: [aten.add, aten.clamp] triton_poi_fused_add_clamp_2.run(buf6, 64, grid=grid(64), stream=stream0) buf7 = empty_strided_cuda((64, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [interpolate], Original ATen: [aten.arange, aten._to_copy, aten.mul, aten.clamp, aten.sub] triton_poi_fused__to_copy_arange_clamp_mul_sub_3.run(buf7, 64, grid=grid(64), stream=stream0) buf9 = empty_strided_cuda((64, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [interpolate], Original ATen: [aten.sub, aten.clamp] triton_poi_fused__to_copy_arange_clamp_mul_sub_3.run(buf9, 64, grid=grid(64), stream=stream0) buf10 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [conv2d_1, interpolate, p4], Original ATen: [aten.convolution, aten._unsafe_index, aten.sub, aten.mul, aten.add] triton_poi_fused__unsafe_index_add_convolution_mul_sub_4.run(buf10, buf3, buf5, buf1, buf6, buf7, buf4, buf9, primals_5, 4194304, grid=grid(4194304), stream=stream0) del primals_5 # Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution] buf11 = extern_kernels.convolution(primals_9, primals_7, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf11, (4, 256, 64, 64), (1048576, 4096, 64, 1)) buf13 = buf11; del buf11 # reuse # Topologically Sorted Source Nodes: [conv2d_2, interpolate_1, p3], Original ATen: [aten.convolution, aten._unsafe_index, aten.sub, aten.mul, aten.add] triton_poi_fused__unsafe_index_add_convolution_mul_sub_4.run(buf13, buf3, buf5, buf10, buf6, buf7, buf4, buf9, primals_8, 4194304, grid=grid(4194304), stream=stream0) del primals_8 # Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution] buf14 = extern_kernels.convolution(primals_12, primals_10, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf14, (4, 256, 64, 64), (1048576, 4096, 64, 1)) buf16 = buf14; del buf14 # reuse # Topologically Sorted Source Nodes: [conv2d_3, interpolate_2, p2], Original ATen: [aten.convolution, aten._unsafe_index, aten.sub, aten.mul, aten.add] triton_poi_fused__unsafe_index_add_convolution_mul_sub_4.run(buf16, buf3, buf5, buf13, buf6, buf7, buf4, buf9, primals_11, 4194304, grid=grid(4194304), stream=stream0) del primals_11 # Topologically Sorted Source Nodes: [conv2d_4], Original ATen: [aten.convolution] buf17 = extern_kernels.convolution(buf1, primals_13, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf17, (4, 256, 64, 64), (1048576, 4096, 64, 1)) buf18 = buf17; del buf17 # reuse # Topologically Sorted Source Nodes: [conv2d_4], Original ATen: [aten.convolution] triton_poi_fused_convolution_0.run(buf18, primals_14, 4194304, grid=grid(4194304), stream=stream0) del primals_14 # Topologically Sorted Source Nodes: [p5_1], Original ATen: [aten.convolution] buf19 = extern_kernels.convolution(buf18, primals_15, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf19, (4, 128, 64, 64), (524288, 4096, 64, 1)) # Topologically Sorted Source Nodes: [conv2d_6], Original ATen: [aten.convolution] buf20 = extern_kernels.convolution(buf10, primals_17, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf20, (4, 256, 64, 64), (1048576, 4096, 64, 1)) buf21 = buf20; del buf20 # reuse # Topologically Sorted Source Nodes: [conv2d_6], Original ATen: [aten.convolution] triton_poi_fused_convolution_0.run(buf21, primals_18, 4194304, grid=grid(4194304), stream=stream0) del primals_18 # Topologically Sorted Source Nodes: [p4_1], Original ATen: [aten.convolution] buf22 = extern_kernels.convolution(buf21, primals_19, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf22, (4, 128, 64, 64), (524288, 4096, 64, 1)) # Topologically Sorted Source Nodes: [conv2d_8], Original ATen: [aten.convolution] buf23 = extern_kernels.convolution(buf13, primals_21, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf23, (4, 256, 64, 64), (1048576, 4096, 64, 1)) buf24 = buf23; del buf23 # reuse # Topologically Sorted Source Nodes: [conv2d_8], Original ATen: [aten.convolution] triton_poi_fused_convolution_0.run(buf24, primals_22, 4194304, grid=grid(4194304), stream=stream0) del primals_22 # Topologically Sorted Source Nodes: [p3_1], Original ATen: [aten.convolution] buf25 = extern_kernels.convolution(buf24, primals_23, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf25, (4, 128, 64, 64), (524288, 4096, 64, 1)) # Topologically Sorted Source Nodes: [conv2d_10], Original ATen: [aten.convolution] buf26 = extern_kernels.convolution(buf16, primals_25, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf26, (4, 256, 64, 64), (1048576, 4096, 64, 1)) buf27 = buf26; del buf26 # reuse # Topologically Sorted Source Nodes: [conv2d_10], Original ATen: [aten.convolution] triton_poi_fused_convolution_0.run(buf27, primals_26, 4194304, grid=grid(4194304), stream=stream0) del primals_26 # Topologically Sorted Source Nodes: [p2_1], Original ATen: [aten.convolution] buf28 = extern_kernels.convolution(buf27, primals_27, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf28, (4, 128, 64, 64), (524288, 4096, 64, 1)) buf29 = empty_strided_cuda((4, 128, 64, 64), (524288, 4096, 64, 1), torch.float32) buf30 = empty_strided_cuda((4, 128, 64, 64), (524288, 4096, 64, 1), torch.float32) buf31 = empty_strided_cuda((4, 128, 64, 64), (524288, 4096, 64, 1), torch.float32) buf32 = empty_strided_cuda((4, 128, 64, 64), (524288, 4096, 64, 1), torch.float32) buf33 = empty_strided_cuda((4, 128, 64, 64), (524288, 4096, 64, 1), torch.float32) buf34 = empty_strided_cuda((4, 128, 64, 64), (524288, 4096, 64, 1), torch.float32) # Topologically Sorted Source Nodes: [p5_1, p4_1, p3_1, p5_2, p4_2, p3_2], Original ATen: [aten.convolution, aten._unsafe_index, aten.sub, aten.mul, aten.add] triton_poi_fused__unsafe_index_add_convolution_mul_sub_5.run(buf3, buf5, buf19, primals_16, buf6, buf7, buf4, buf9, buf22, primals_20, buf25, primals_24, buf29, buf30, buf31, buf32, buf33, buf34, 2097152, grid=grid(2097152), stream=stream0) del buf19 del buf22 del buf25 del primals_16 del primals_20 del primals_24 buf35 = empty_strided_cuda((4, 512, 64, 64), (2097152, 4096, 64, 1), torch.float32) # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] triton_poi_fused_cat_6.run(buf29, buf30, buf31, buf32, buf33, buf34, buf28, primals_28, buf35, 8388608, grid=grid(8388608), stream=stream0) del buf28 del buf29 del buf30 del buf31 del buf32 del buf33 del buf34 del primals_28 # Topologically Sorted Source Nodes: [output], Original ATen: [aten.convolution] buf36 = extern_kernels.convolution(buf35, primals_29, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf36, (4, 4, 64, 64), (16384, 4096, 64, 1)) buf37 = buf36; del buf36 # reuse # Topologically Sorted Source Nodes: [output], Original ATen: [aten.convolution] triton_poi_fused_convolution_7.run(buf37, primals_30, 65536, grid=grid(65536), stream=stream0) del primals_30 return (buf37, primals_1, primals_3, primals_4, primals_6, primals_7, primals_9, primals_10, primals_12, primals_13, primals_15, primals_17, primals_19, primals_21, primals_23, primals_25, primals_27, primals_29, buf1, buf3, buf4, buf5, buf6, buf7, buf9, buf10, buf13, buf16, buf18, buf21, buf24, buf27, buf35, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((256, 2048, 1, 1), (2048, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 2048, 64, 64), (8388608, 4096, 64, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((256, 1024, 1, 1), (1024, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 1024, 64, 64), (4194304, 4096, 64, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((256, 512, 1, 1), (512, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, 512, 64, 64), (2097152, 4096, 64, 1), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((256, 256, 1, 1), (256, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((4, 256, 64, 64), (1048576, 4096, 64, 1), device='cuda:0', dtype=torch.float32) primals_13 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_14 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_15 = rand_strided((128, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_16 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_17 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_18 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_19 = rand_strided((128, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_20 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_21 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_22 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_23 = rand_strided((128, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_24 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_25 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_26 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_27 = rand_strided((128, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_28 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_29 = rand_strided((4, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_30 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F import torch.nn as nn class fpn_module(nn.Module): def __init__(self, numClass): super(fpn_module, self).__init__() self.toplayer = nn.Conv2d(2048, 256, kernel_size=1, stride=1, padding=0 ) self.smooth1_1 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1 ) self.smooth2_1 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1 ) self.smooth3_1 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1 ) self.smooth4_1 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1 ) self.smooth1_2 = nn.Conv2d(256, 128, kernel_size=3, stride=1, padding=1 ) self.smooth2_2 = nn.Conv2d(256, 128, kernel_size=3, stride=1, padding=1 ) self.smooth3_2 = nn.Conv2d(256, 128, kernel_size=3, stride=1, padding=1 ) self.smooth4_2 = nn.Conv2d(256, 128, kernel_size=3, stride=1, padding=1 ) self.latlayer1 = nn.Conv2d(1024, 256, kernel_size=1, stride=1, padding=0) self.latlayer2 = nn.Conv2d(512, 256, kernel_size=1, stride=1, padding=0 ) self.latlayer3 = nn.Conv2d(256, 256, kernel_size=1, stride=1, padding=0 ) self.classify = nn.Conv2d(128 * 4, numClass, kernel_size=3, stride= 1, padding=1) def _concatenate(self, p5, p4, p3, p2): _, _, H, W = p2.size() p5 = F.interpolate(p5, size=(H, W), mode='bilinear', align_corners=True ) p4 = F.interpolate(p4, size=(H, W), mode='bilinear', align_corners=True ) p3 = F.interpolate(p3, size=(H, W), mode='bilinear', align_corners=True ) return torch.cat([p5, p4, p3, p2], dim=1) def _upsample_add(self, x, y): _, _, H, W = y.size() return F.interpolate(x, size=(H, W), mode='bilinear', align_corners =True) + y def forward(self, c2, c3, c4, c5): p5 = self.toplayer(c5) p4 = self._upsample_add(p5, self.latlayer1(c4)) p3 = self._upsample_add(p4, self.latlayer2(c3)) p2 = self._upsample_add(p3, self.latlayer3(c2)) p5 = self.smooth1_2(self.smooth1_1(p5)) p4 = self.smooth2_2(self.smooth2_1(p4)) p3 = self.smooth3_2(self.smooth3_1(p3)) p2 = self.smooth4_2(self.smooth4_1(p2)) output = self.classify(self._concatenate(p5, p4, p3, p2)) return output def get_inputs(): return [torch.rand([4, 256, 64, 64]), torch.rand([4, 512, 64, 64]), torch.rand([4, 1024, 64, 64]), torch.rand([4, 2048, 64, 64])] def get_init_inputs(): return [[], {'numClass': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn.functional as F import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 256 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, None) @triton.jit def triton_poi_fused__to_copy_1(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 1.0 tmp3 = tmp1 * tmp2 tmp4 = 0.0 tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp6 = tmp5.to(tl.int32) tl.store(out_ptr0 + x0, tmp6, xmask) @triton.jit def triton_poi_fused_add_clamp_2(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 1.0 tmp3 = tmp1 * tmp2 tmp4 = 0.0 tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp6 = tmp5.to(tl.int32) tmp7 = tl.full([1], 1, tl.int64) tmp8 = tmp6 + tmp7 tmp9 = tl.full([1], 63, tl.int64) tmp10 = triton_helpers.minimum(tmp8, tmp9) tl.store(out_ptr0 + x0, tmp10, xmask) @triton.jit def triton_poi_fused__to_copy_arange_clamp_mul_sub_3(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 1.0 tmp3 = tmp1 * tmp2 tmp4 = 0.0 tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp6 = tmp5.to(tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 - tmp7 tmp9 = triton_helpers.maximum(tmp8, tmp4) tmp10 = triton_helpers.minimum(tmp9, tmp2) tl.store(out_ptr0 + x0, tmp10, xmask) @triton.jit def triton_poi_fused__unsafe_index_add_convolution_mul_sub_4(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 64 % 64 x0 = xindex % 64 x2 = xindex // 4096 x6 = xindex x3 = xindex // 4096 % 256 tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last') tmp16 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last') tmp19 = tl.load(in_ptr5 + x1, None, eviction_policy='evict_last') tmp29 = tl.load(in_ptr6 + x1, None, eviction_policy='evict_last') tmp32 = tl.load(in_out_ptr0 + x6, None) tmp33 = tl.load(in_ptr7 + x3, None, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 64, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tmp6 = tmp5 + tmp1 tmp7 = tmp5 < 0 tmp8 = tl.where(tmp7, tmp6, tmp5) tmp9 = tl.load(in_ptr2 + (tmp8 + 64 * tmp4 + 4096 * x2), None, eviction_policy='evict_last') tmp11 = tmp10 + tmp1 tmp12 = tmp10 < 0 tmp13 = tl.where(tmp12, tmp11, tmp10) tmp14 = tl.load(in_ptr2 + (tmp13 + 64 * tmp4 + 4096 * x2), None, eviction_policy='evict_last') tmp15 = tmp14 - tmp9 tmp17 = tmp15 * tmp16 tmp18 = tmp9 + tmp17 tmp20 = tmp19 + tmp1 tmp21 = tmp19 < 0 tmp22 = tl.where(tmp21, tmp20, tmp19) tmp23 = tl.load(in_ptr2 + (tmp8 + 64 * tmp22 + 4096 * x2), None, eviction_policy='evict_last') tmp24 = tl.load(in_ptr2 + (tmp13 + 64 * tmp22 + 4096 * x2), None, eviction_policy='evict_last') tmp25 = tmp24 - tmp23 tmp26 = tmp25 * tmp16 tmp27 = tmp23 + tmp26 tmp28 = tmp27 - tmp18 tmp30 = tmp28 * tmp29 tmp31 = tmp18 + tmp30 tmp34 = tmp32 + tmp33 tmp35 = tmp31 + tmp34 tl.store(in_out_ptr0 + x6, tmp35, None) @triton.jit def triton_poi_fused__unsafe_index_add_convolution_mul_sub_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10, in_ptr11, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 64 % 64 x0 = xindex % 64 x5 = xindex // 4096 x2 = xindex // 4096 % 128 x6 = xindex tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr3 + x2, None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last') tmp19 = tl.load(in_ptr5 + x0, None, eviction_policy='evict_last') tmp22 = tl.load(in_ptr6 + x1, None, eviction_policy='evict_last') tmp34 = tl.load(in_ptr7 + x1, None, eviction_policy='evict_last') tmp37 = tl.load(in_ptr9 + x2, None, eviction_policy='evict_last') tmp54 = tl.load(in_ptr11 + x2, None, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 64, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tmp6 = tmp5 + tmp1 tmp7 = tmp5 < 0 tmp8 = tl.where(tmp7, tmp6, tmp5) tmp9 = tl.load(in_ptr2 + (tmp8 + 64 * tmp4 + 4096 * x5), None, eviction_policy='evict_last') tmp11 = tmp9 + tmp10 tmp13 = tmp12 + tmp1 tmp14 = tmp12 < 0 tmp15 = tl.where(tmp14, tmp13, tmp12) tmp16 = tl.load(in_ptr2 + (tmp15 + 64 * tmp4 + 4096 * x5), None, eviction_policy='evict_last') tmp17 = tmp16 + tmp10 tmp18 = tmp17 - tmp11 tmp20 = tmp18 * tmp19 tmp21 = tmp11 + tmp20 tmp23 = tmp22 + tmp1 tmp24 = tmp22 < 0 tmp25 = tl.where(tmp24, tmp23, tmp22) tmp26 = tl.load(in_ptr2 + (tmp8 + 64 * tmp25 + 4096 * x5), None, eviction_policy='evict_last') tmp27 = tmp26 + tmp10 tmp28 = tl.load(in_ptr2 + (tmp15 + 64 * tmp25 + 4096 * x5), None, eviction_policy='evict_last') tmp29 = tmp28 + tmp10 tmp30 = tmp29 - tmp27 tmp31 = tmp30 * tmp19 tmp32 = tmp27 + tmp31 tmp33 = tmp32 - tmp21 tmp35 = tmp33 * tmp34 tmp36 = tl.load(in_ptr8 + (tmp8 + 64 * tmp4 + 4096 * x5), None, eviction_policy='evict_last') tmp38 = tmp36 + tmp37 tmp39 = tl.load(in_ptr8 + (tmp15 + 64 * tmp4 + 4096 * x5), None, eviction_policy='evict_last') tmp40 = tmp39 + tmp37 tmp41 = tmp40 - tmp38 tmp42 = tmp41 * tmp19 tmp43 = tmp38 + tmp42 tmp44 = tl.load(in_ptr8 + (tmp8 + 64 * tmp25 + 4096 * x5), None, eviction_policy='evict_last') tmp45 = tmp44 + tmp37 tmp46 = tl.load(in_ptr8 + (tmp15 + 64 * tmp25 + 4096 * x5), None, eviction_policy='evict_last') tmp47 = tmp46 + tmp37 tmp48 = tmp47 - tmp45 tmp49 = tmp48 * tmp19 tmp50 = tmp45 + tmp49 tmp51 = tmp50 - tmp43 tmp52 = tmp51 * tmp34 tmp53 = tl.load(in_ptr10 + (tmp8 + 64 * tmp4 + 4096 * x5), None, eviction_policy='evict_last') tmp55 = tmp53 + tmp54 tmp56 = tl.load(in_ptr10 + (tmp15 + 64 * tmp4 + 4096 * x5), None, eviction_policy='evict_last') tmp57 = tmp56 + tmp54 tmp58 = tmp57 - tmp55 tmp59 = tmp58 * tmp19 tmp60 = tmp55 + tmp59 tmp61 = tl.load(in_ptr10 + (tmp8 + 64 * tmp25 + 4096 * x5), None, eviction_policy='evict_last') tmp62 = tmp61 + tmp54 tmp63 = tl.load(in_ptr10 + (tmp15 + 64 * tmp25 + 4096 * x5), None, eviction_policy='evict_last') tmp64 = tmp63 + tmp54 tmp65 = tmp64 - tmp62 tmp66 = tmp65 * tmp19 tmp67 = tmp62 + tmp66 tmp68 = tmp67 - tmp60 tmp69 = tmp68 * tmp34 tl.store(out_ptr0 + x6, tmp21, None) tl.store(out_ptr1 + x6, tmp35, None) tl.store(out_ptr2 + x6, tmp43, None) tl.store(out_ptr3 + x6, tmp52, None) tl.store(out_ptr4 + x6, tmp60, None) tl.store(out_ptr5 + x6, tmp69, None) @triton.jit def triton_poi_fused_cat_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 4096 % 512 x0 = xindex % 4096 x2 = xindex // 2097152 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 128, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 4096 * x1 + 524288 * x2), tmp4, other=0.0) tmp6 = tl.load(in_ptr1 + (x0 + 4096 * x1 + 524288 * x2), tmp4, other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype) tmp9 = tl.where(tmp4, tmp7, tmp8) tmp10 = tmp0 >= tmp3 tmp11 = tl.full([1], 256, tl.int64) tmp12 = tmp0 < tmp11 tmp13 = tmp10 & tmp12 tmp14 = tl.load(in_ptr2 + (x0 + 4096 * (-128 + x1) + 524288 * x2), tmp13, other=0.0) tmp15 = tl.load(in_ptr3 + (x0 + 4096 * (-128 + x1) + 524288 * x2), tmp13, other=0.0) tmp16 = tmp14 + tmp15 tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype) tmp18 = tl.where(tmp13, tmp16, tmp17) tmp19 = tmp0 >= tmp11 tmp20 = tl.full([1], 384, tl.int64) tmp21 = tmp0 < tmp20 tmp22 = tmp19 & tmp21 tmp23 = tl.load(in_ptr4 + (x0 + 4096 * (-256 + x1) + 524288 * x2), tmp22, other=0.0) tmp24 = tl.load(in_ptr5 + (x0 + 4096 * (-256 + x1) + 524288 * x2), tmp22, other=0.0) tmp25 = tmp23 + tmp24 tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype) tmp27 = tl.where(tmp22, tmp25, tmp26) tmp28 = tmp0 >= tmp20 tl.full([1], 512, tl.int64) tmp31 = tl.load(in_ptr6 + (x0 + 4096 * (-384 + x1) + 524288 * x2), tmp28, other=0.0) tmp32 = tl.load(in_ptr7 + (-384 + x1), tmp28, eviction_policy= 'evict_last', other=0.0) tmp33 = tmp31 + tmp32 tmp34 = tl.full(tmp33.shape, 0.0, tmp33.dtype) tmp35 = tl.where(tmp28, tmp33, tmp34) tmp36 = tl.where(tmp22, tmp27, tmp35) tmp37 = tl.where(tmp13, tmp18, tmp36) tmp38 = tl.where(tmp4, tmp9, tmp37) tl.store(out_ptr0 + x3, tmp38, None) @triton.jit def triton_poi_fused_convolution_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 4 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30) = args args.clear() assert_size_stride(primals_1, (256, 2048, 1, 1), (2048, 1, 1, 1)) assert_size_stride(primals_2, (256,), (1,)) assert_size_stride(primals_3, (4, 2048, 64, 64), (8388608, 4096, 64, 1)) assert_size_stride(primals_4, (256, 1024, 1, 1), (1024, 1, 1, 1)) assert_size_stride(primals_5, (256,), (1,)) assert_size_stride(primals_6, (4, 1024, 64, 64), (4194304, 4096, 64, 1)) assert_size_stride(primals_7, (256, 512, 1, 1), (512, 1, 1, 1)) assert_size_stride(primals_8, (256,), (1,)) assert_size_stride(primals_9, (4, 512, 64, 64), (2097152, 4096, 64, 1)) assert_size_stride(primals_10, (256, 256, 1, 1), (256, 1, 1, 1)) assert_size_stride(primals_11, (256,), (1,)) assert_size_stride(primals_12, (4, 256, 64, 64), (1048576, 4096, 64, 1)) assert_size_stride(primals_13, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_14, (256,), (1,)) assert_size_stride(primals_15, (128, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_16, (128,), (1,)) assert_size_stride(primals_17, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_18, (256,), (1,)) assert_size_stride(primals_19, (128, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_20, (128,), (1,)) assert_size_stride(primals_21, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_22, (256,), (1,)) assert_size_stride(primals_23, (128, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_24, (128,), (1,)) assert_size_stride(primals_25, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_26, (256,), (1,)) assert_size_stride(primals_27, (128, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_28, (128,), (1,)) assert_size_stride(primals_29, (4, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_30, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 256, 64, 64), (1048576, 4096, 64, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(4194304)](buf1, primals_2, 4194304, XBLOCK=1024, num_warps=4, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(primals_6, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 256, 64, 64), (1048576, 4096, 64, 1)) buf3 = empty_strided_cuda((64, 1), (1, 1), torch.int64) triton_poi_fused__to_copy_1[grid(64)](buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) buf4 = empty_strided_cuda((64, 1), (1, 1), torch.int64) triton_poi_fused_add_clamp_2[grid(64)](buf4, 64, XBLOCK=64, num_warps=1, num_stages=1) buf5 = empty_strided_cuda((64,), (1,), torch.int64) triton_poi_fused__to_copy_1[grid(64)](buf5, 64, XBLOCK=64, num_warps=1, num_stages=1) buf6 = empty_strided_cuda((64,), (1,), torch.int64) triton_poi_fused_add_clamp_2[grid(64)](buf6, 64, XBLOCK=64, num_warps=1, num_stages=1) buf7 = empty_strided_cuda((64,), (1,), torch.float32) triton_poi_fused__to_copy_arange_clamp_mul_sub_3[grid(64)](buf7, 64, XBLOCK=64, num_warps=1, num_stages=1) buf9 = empty_strided_cuda((64, 1), (1, 1), torch.float32) triton_poi_fused__to_copy_arange_clamp_mul_sub_3[grid(64)](buf9, 64, XBLOCK=64, num_warps=1, num_stages=1) buf10 = buf2 del buf2 triton_poi_fused__unsafe_index_add_convolution_mul_sub_4[grid(4194304) ](buf10, buf3, buf5, buf1, buf6, buf7, buf4, buf9, primals_5, 4194304, XBLOCK=512, num_warps=8, num_stages=1) del primals_5 buf11 = extern_kernels.convolution(primals_9, primals_7, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf11, (4, 256, 64, 64), (1048576, 4096, 64, 1)) buf13 = buf11 del buf11 triton_poi_fused__unsafe_index_add_convolution_mul_sub_4[grid(4194304) ](buf13, buf3, buf5, buf10, buf6, buf7, buf4, buf9, primals_8, 4194304, XBLOCK=512, num_warps=8, num_stages=1) del primals_8 buf14 = extern_kernels.convolution(primals_12, primals_10, stride=( 1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf14, (4, 256, 64, 64), (1048576, 4096, 64, 1)) buf16 = buf14 del buf14 triton_poi_fused__unsafe_index_add_convolution_mul_sub_4[grid(4194304) ](buf16, buf3, buf5, buf13, buf6, buf7, buf4, buf9, primals_11, 4194304, XBLOCK=512, num_warps=8, num_stages=1) del primals_11 buf17 = extern_kernels.convolution(buf1, primals_13, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf17, (4, 256, 64, 64), (1048576, 4096, 64, 1)) buf18 = buf17 del buf17 triton_poi_fused_convolution_0[grid(4194304)](buf18, primals_14, 4194304, XBLOCK=1024, num_warps=4, num_stages=1) del primals_14 buf19 = extern_kernels.convolution(buf18, primals_15, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf19, (4, 128, 64, 64), (524288, 4096, 64, 1)) buf20 = extern_kernels.convolution(buf10, primals_17, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf20, (4, 256, 64, 64), (1048576, 4096, 64, 1)) buf21 = buf20 del buf20 triton_poi_fused_convolution_0[grid(4194304)](buf21, primals_18, 4194304, XBLOCK=1024, num_warps=4, num_stages=1) del primals_18 buf22 = extern_kernels.convolution(buf21, primals_19, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf22, (4, 128, 64, 64), (524288, 4096, 64, 1)) buf23 = extern_kernels.convolution(buf13, primals_21, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf23, (4, 256, 64, 64), (1048576, 4096, 64, 1)) buf24 = buf23 del buf23 triton_poi_fused_convolution_0[grid(4194304)](buf24, primals_22, 4194304, XBLOCK=1024, num_warps=4, num_stages=1) del primals_22 buf25 = extern_kernels.convolution(buf24, primals_23, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf25, (4, 128, 64, 64), (524288, 4096, 64, 1)) buf26 = extern_kernels.convolution(buf16, primals_25, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf26, (4, 256, 64, 64), (1048576, 4096, 64, 1)) buf27 = buf26 del buf26 triton_poi_fused_convolution_0[grid(4194304)](buf27, primals_26, 4194304, XBLOCK=1024, num_warps=4, num_stages=1) del primals_26 buf28 = extern_kernels.convolution(buf27, primals_27, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf28, (4, 128, 64, 64), (524288, 4096, 64, 1)) buf29 = empty_strided_cuda((4, 128, 64, 64), (524288, 4096, 64, 1), torch.float32) buf30 = empty_strided_cuda((4, 128, 64, 64), (524288, 4096, 64, 1), torch.float32) buf31 = empty_strided_cuda((4, 128, 64, 64), (524288, 4096, 64, 1), torch.float32) buf32 = empty_strided_cuda((4, 128, 64, 64), (524288, 4096, 64, 1), torch.float32) buf33 = empty_strided_cuda((4, 128, 64, 64), (524288, 4096, 64, 1), torch.float32) buf34 = empty_strided_cuda((4, 128, 64, 64), (524288, 4096, 64, 1), torch.float32) triton_poi_fused__unsafe_index_add_convolution_mul_sub_5[grid(2097152) ](buf3, buf5, buf19, primals_16, buf6, buf7, buf4, buf9, buf22, primals_20, buf25, primals_24, buf29, buf30, buf31, buf32, buf33, buf34, 2097152, XBLOCK=512, num_warps=8, num_stages=1) del buf19 del buf22 del buf25 del primals_16 del primals_20 del primals_24 buf35 = empty_strided_cuda((4, 512, 64, 64), (2097152, 4096, 64, 1), torch.float32) triton_poi_fused_cat_6[grid(8388608)](buf29, buf30, buf31, buf32, buf33, buf34, buf28, primals_28, buf35, 8388608, XBLOCK=1024, num_warps=4, num_stages=1) del buf28 del buf29 del buf30 del buf31 del buf32 del buf33 del buf34 del primals_28 buf36 = extern_kernels.convolution(buf35, primals_29, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf36, (4, 4, 64, 64), (16384, 4096, 64, 1)) buf37 = buf36 del buf36 triton_poi_fused_convolution_7[grid(65536)](buf37, primals_30, 65536, XBLOCK=256, num_warps=4, num_stages=1) del primals_30 return (buf37, primals_1, primals_3, primals_4, primals_6, primals_7, primals_9, primals_10, primals_12, primals_13, primals_15, primals_17, primals_19, primals_21, primals_23, primals_25, primals_27, primals_29, buf1, buf3, buf4, buf5, buf6, buf7, buf9, buf10, buf13, buf16, buf18, buf21, buf24, buf27, buf35) class fpn_moduleNew(nn.Module): def __init__(self, numClass): super(fpn_moduleNew, self).__init__() self.toplayer = nn.Conv2d(2048, 256, kernel_size=1, stride=1, padding=0 ) self.smooth1_1 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1 ) self.smooth2_1 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1 ) self.smooth3_1 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1 ) self.smooth4_1 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1 ) self.smooth1_2 = nn.Conv2d(256, 128, kernel_size=3, stride=1, padding=1 ) self.smooth2_2 = nn.Conv2d(256, 128, kernel_size=3, stride=1, padding=1 ) self.smooth3_2 = nn.Conv2d(256, 128, kernel_size=3, stride=1, padding=1 ) self.smooth4_2 = nn.Conv2d(256, 128, kernel_size=3, stride=1, padding=1 ) self.latlayer1 = nn.Conv2d(1024, 256, kernel_size=1, stride=1, padding=0) self.latlayer2 = nn.Conv2d(512, 256, kernel_size=1, stride=1, padding=0 ) self.latlayer3 = nn.Conv2d(256, 256, kernel_size=1, stride=1, padding=0 ) self.classify = nn.Conv2d(128 * 4, numClass, kernel_size=3, stride= 1, padding=1) def _concatenate(self, p5, p4, p3, p2): _, _, H, W = p2.size() p5 = F.interpolate(p5, size=(H, W), mode='bilinear', align_corners=True ) p4 = F.interpolate(p4, size=(H, W), mode='bilinear', align_corners=True ) p3 = F.interpolate(p3, size=(H, W), mode='bilinear', align_corners=True ) return torch.cat([p5, p4, p3, p2], dim=1) def _upsample_add(self, x, y): _, _, H, W = y.size() return F.interpolate(x, size=(H, W), mode='bilinear', align_corners =True) + y def forward(self, input_0, input_1, input_2, input_3): primals_1 = self.toplayer.weight primals_2 = self.toplayer.bias primals_13 = self.smooth1_1.weight primals_5 = self.smooth1_1.bias primals_17 = self.smooth2_1.weight primals_8 = self.smooth2_1.bias primals_21 = self.smooth3_1.weight primals_11 = self.smooth3_1.bias primals_25 = self.smooth4_1.weight primals_14 = self.smooth4_1.bias primals_15 = self.smooth1_2.weight primals_16 = self.smooth1_2.bias primals_19 = self.smooth2_2.weight primals_20 = self.smooth2_2.bias primals_23 = self.smooth3_2.weight primals_24 = self.smooth3_2.bias primals_27 = self.smooth4_2.weight primals_28 = self.smooth4_2.bias primals_4 = self.latlayer1.weight primals_18 = self.latlayer1.bias primals_7 = self.latlayer2.weight primals_22 = self.latlayer2.bias primals_10 = self.latlayer3.weight primals_26 = self.latlayer3.bias primals_29 = self.classify.weight primals_30 = self.classify.bias primals_12 = input_0 primals_9 = input_1 primals_6 = input_2 primals_3 = input_3 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30]) return output[0]
LOUEY233/CPS3320_python
fpn_module
false
1,113
[ "MIT" ]
0
3cc1733d91c3a8f680eeb984348e2a52ae3285ec
https://github.com/LOUEY233/CPS3320_python/tree/3cc1733d91c3a8f680eeb984348e2a52ae3285ec
import torch import torch.nn.functional as F import torch.nn as nn class Model(nn.Module): def __init__(self, numClass): super().__init__() self.toplayer = nn.Conv2d(2048, 256, kernel_size=1, stride=1, padding=0 ) self.smooth1_1 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1 ) self.smooth2_1 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1 ) self.smooth3_1 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1 ) self.smooth4_1 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1 ) self.smooth1_2 = nn.Conv2d(256, 128, kernel_size=3, stride=1, padding=1 ) self.smooth2_2 = nn.Conv2d(256, 128, kernel_size=3, stride=1, padding=1 ) self.smooth3_2 = nn.Conv2d(256, 128, kernel_size=3, stride=1, padding=1 ) self.smooth4_2 = nn.Conv2d(256, 128, kernel_size=3, stride=1, padding=1 ) self.latlayer1 = nn.Conv2d(1024, 256, kernel_size=1, stride=1, padding=0) self.latlayer2 = nn.Conv2d(512, 256, kernel_size=1, stride=1, padding=0 ) self.latlayer3 = nn.Conv2d(256, 256, kernel_size=1, stride=1, padding=0 ) self.classify = nn.Conv2d(128 * 4, numClass, kernel_size=3, stride= 1, padding=1) def _concatenate(self, p5, p4, p3, p2): _, _, H, W = p2.size() p5 = F.interpolate(p5, size=(H, W), mode='bilinear', align_corners=True ) p4 = F.interpolate(p4, size=(H, W), mode='bilinear', align_corners=True ) p3 = F.interpolate(p3, size=(H, W), mode='bilinear', align_corners=True ) return torch.cat([p5, p4, p3, p2], dim=1) def _upsample_add(self, x, y): _, _, H, W = y.size() return F.interpolate(x, size=(H, W), mode='bilinear', align_corners =True) + y def forward(self, c2, c3, c4, c5): p5 = self.toplayer(c5) p4 = self._upsample_add(p5, self.latlayer1(c4)) p3 = self._upsample_add(p4, self.latlayer2(c3)) p2 = self._upsample_add(p3, self.latlayer3(c2)) p5 = self.smooth1_2(self.smooth1_1(p5)) p4 = self.smooth2_2(self.smooth2_1(p4)) p3 = self.smooth3_2(self.smooth3_1(p3)) p2 = self.smooth4_2(self.smooth4_1(p2)) output = self.classify(self._concatenate(p5, p4, p3, p2)) return output def get_inputs(): return [torch.rand([4, 256, 64, 64]), torch.rand([4, 512, 64, 64]), torch.rand([4, 1024, 64, 64]), torch.rand([4, 2048, 64, 64])] def get_init_inputs(): return [4]
Bilinear
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/55/c556jlky4dn3zysohrmkzzgx6ib2mru2umit7utahhvakbnltdhb.py # Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.clone] # Source node to ATen node mapping: # matmul_1 => clone # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 64 x2 = (xindex // 256) x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (x3), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/c5/cc5niyk56fa3m2oc45eqgnf3eeposiu56eefgo6q2ozqlq3nwxnc.py # Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.clone] # Source node to ATen node mapping: # matmul_1 => clone_1 # Graph fragment: # %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_1,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_1 = async_compile.triton('triton_poi_fused_clone_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x2 = (xindex // 64) x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (16*x2)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (x3), tmp0, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), primals_1, out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.clone] stream0 = get_raw_stream(0) triton_poi_fused_clone_0.run(buf0, buf1, 1024, grid=grid(1024), stream=stream0) del buf0 buf2 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.clone] triton_poi_fused_clone_1.run(primals_3, buf2, 1024, grid=grid(1024), stream=stream0) del primals_3 buf3 = empty_strided_cuda((64, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf1, (64, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf2, (64, 4, 4), (16, 4, 1), 0), out=buf3) del buf1 return (reinterpret_tensor(buf3, (1024, 1), (1, 1), 0), reinterpret_tensor(buf2, (64, 4, 4), (16, 1, 4), 0), reinterpret_tensor(primals_2, (4, 64), (1, 4), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class Bilinear(nn.Module): def __init__(self, size): super(Bilinear, self).__init__() self.size = size self.mat = nn.Parameter(torch.FloatTensor(self.size, self.size)) self.reset_parameters() def reset_parameters(self): params = [p for p in self.parameters() if p.requires_grad] for i, param in enumerate(params): param.data.normal_() def forward(self, vector1, vector2): bma = torch.matmul(vector1, self.mat).unsqueeze(1) ba = torch.matmul(bma, vector2.unsqueeze(2)).view(-1, 1) return ba def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 64 x2 = xindex // 256 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tl.store(out_ptr0 + x3, tmp0, xmask) @triton.jit def triton_poi_fused_clone_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x2 = xindex // 64 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tl.store(out_ptr0 + x3, tmp0, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), primals_1, out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(1024)](buf0, buf1, 1024, XBLOCK=256, num_warps=4, num_stages=1) del buf0 buf2 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) triton_poi_fused_clone_1[grid(1024)](primals_3, buf2, 1024, XBLOCK= 256, num_warps=4, num_stages=1) del primals_3 buf3 = empty_strided_cuda((64, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf1, (64, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf2, (64, 4, 4), (16, 4, 1), 0), out=buf3) del buf1 return reinterpret_tensor(buf3, (1024, 1), (1, 1), 0), reinterpret_tensor( buf2, (64, 4, 4), (16, 1, 4), 0), reinterpret_tensor(primals_2, (4, 64), (1, 4), 0) class BilinearNew(nn.Module): def __init__(self, size): super(BilinearNew, self).__init__() self.size = size self.mat = nn.Parameter(torch.FloatTensor(self.size, self.size)) self.reset_parameters() def reset_parameters(self): params = [p for p in self.parameters() if p.requires_grad] for i, param in enumerate(params): param.data.normal_() def forward(self, input_0, input_1): primals_1 = self.mat primals_2 = input_0 primals_3 = input_1 output = call([primals_1, primals_2, primals_3]) return output[0]
TRUMANCFY/VL-DIORA
Bilinear
false
1,114
[ "Apache-2.0" ]
0
cef398e05842d4a30345260d8e27d1c362671834
https://github.com/TRUMANCFY/VL-DIORA/tree/cef398e05842d4a30345260d8e27d1c362671834
import torch import torch.nn as nn class Model(nn.Module): def __init__(self, size): super().__init__() self.size = size self.mat = nn.Parameter(torch.FloatTensor(self.size, self.size)) self.reset_parameters() def reset_parameters(self): params = [p for p in self.parameters() if p.requires_grad] for i, param in enumerate(params): param.data.normal_() def forward(self, vector1, vector2): bma = torch.matmul(vector1, self.mat).unsqueeze(1) ba = torch.matmul(bma, vector2.unsqueeze(2)).view(-1, 1) return ba def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4]
N_TransE
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/w2/cw2psuktwvgqiciur5rr6hsrv47ce76ixqoodmx4iymb6o43ia3n.py # Topologically Sorted Source Nodes: [add, sub, norm, pred], Original ATen: [aten.add, aten.sub, aten.linalg_vector_norm, aten.neg] # Source node to ATen node mapping: # add => add # norm => pow_1, pow_2, sum_1 # pred => neg # sub => sub # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg0_1, %arg1_1), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %arg2_1), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 4), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1]), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.25), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%pow_2,), kwargs = {}) triton_poi_fused_add_linalg_vector_norm_neg_sub_0 = async_compile.triton('triton_poi_fused_add_linalg_vector_norm_neg_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_linalg_vector_norm_neg_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 12, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_linalg_vector_norm_neg_sub_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = (xindex // 16) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask) tmp1 = tl.load(in_ptr1 + (x0 + (64*x1)), xmask) tmp3 = tl.load(in_ptr2 + (x0 + (64*x1)), xmask) tmp7 = tl.load(in_ptr0 + (16 + x0 + (64*x1)), xmask) tmp8 = tl.load(in_ptr1 + (16 + x0 + (64*x1)), xmask) tmp10 = tl.load(in_ptr2 + (16 + x0 + (64*x1)), xmask) tmp15 = tl.load(in_ptr0 + (32 + x0 + (64*x1)), xmask) tmp16 = tl.load(in_ptr1 + (32 + x0 + (64*x1)), xmask) tmp18 = tl.load(in_ptr2 + (32 + x0 + (64*x1)), xmask) tmp23 = tl.load(in_ptr0 + (48 + x0 + (64*x1)), xmask) tmp24 = tl.load(in_ptr1 + (48 + x0 + (64*x1)), xmask) tmp26 = tl.load(in_ptr2 + (48 + x0 + (64*x1)), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp5 = tmp4 * tmp4 tmp6 = tmp5 * tmp5 tmp9 = tmp7 + tmp8 tmp11 = tmp9 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tmp12 * tmp12 tmp14 = tmp6 + tmp13 tmp17 = tmp15 + tmp16 tmp19 = tmp17 - tmp18 tmp20 = tmp19 * tmp19 tmp21 = tmp20 * tmp20 tmp22 = tmp14 + tmp21 tmp25 = tmp23 + tmp24 tmp27 = tmp25 - tmp26 tmp28 = tmp27 * tmp27 tmp29 = tmp28 * tmp28 tmp30 = tmp22 + tmp29 tmp31 = 0.25 tmp32 = libdevice.pow(tmp30, tmp31) tmp33 = -tmp32 tl.store(in_out_ptr0 + (x2), tmp33, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [add, sub, norm, pred], Original ATen: [aten.add, aten.sub, aten.linalg_vector_norm, aten.neg] stream0 = get_raw_stream(0) triton_poi_fused_add_linalg_vector_norm_neg_sub_0.run(buf1, arg0_1, arg1_1, arg2_1, 64, grid=grid(64), stream=stream0) del arg0_1 del arg1_1 del arg2_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1, arg2_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F class N_TransE(torch.nn.Module): def __init__(self, p, params): super(N_TransE, self).__init__() self.p = p self.params = params def forward(self, e1, r, e2): pred = -torch.norm(e1 + r - e2, p=self.p, dim=1) return pred def loss(self, pos_score, neg_score, target): return F.relu(pos_score + self.params[0] - neg_score).sum( ) + self.params[1] * F.relu(pos_score - self.params[2]).sum() def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {'p': 4, 'params': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_linalg_vector_norm_neg_sub_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tmp1 = tl.load(in_ptr1 + (x0 + 64 * x1), xmask) tmp3 = tl.load(in_ptr2 + (x0 + 64 * x1), xmask) tmp7 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask) tmp8 = tl.load(in_ptr1 + (16 + x0 + 64 * x1), xmask) tmp10 = tl.load(in_ptr2 + (16 + x0 + 64 * x1), xmask) tmp15 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask) tmp16 = tl.load(in_ptr1 + (32 + x0 + 64 * x1), xmask) tmp18 = tl.load(in_ptr2 + (32 + x0 + 64 * x1), xmask) tmp23 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask) tmp24 = tl.load(in_ptr1 + (48 + x0 + 64 * x1), xmask) tmp26 = tl.load(in_ptr2 + (48 + x0 + 64 * x1), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp5 = tmp4 * tmp4 tmp6 = tmp5 * tmp5 tmp9 = tmp7 + tmp8 tmp11 = tmp9 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tmp12 * tmp12 tmp14 = tmp6 + tmp13 tmp17 = tmp15 + tmp16 tmp19 = tmp17 - tmp18 tmp20 = tmp19 * tmp19 tmp21 = tmp20 * tmp20 tmp22 = tmp14 + tmp21 tmp25 = tmp23 + tmp24 tmp27 = tmp25 - tmp26 tmp28 = tmp27 * tmp27 tmp29 = tmp28 * tmp28 tmp30 = tmp22 + tmp29 tmp31 = 0.25 tmp32 = libdevice.pow(tmp30, tmp31) tmp33 = -tmp32 tl.store(in_out_ptr0 + x2, tmp33, xmask) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_add_linalg_vector_norm_neg_sub_0[grid(64)](buf1, arg0_1, arg1_1, arg2_1, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 del arg1_1 del arg2_1 return buf1, class N_TransENew(torch.nn.Module): def __init__(self, p, params): super(N_TransENew, self).__init__() self.p = p self.params = params def loss(self, pos_score, neg_score, target): return F.relu(pos_score + self.params[0] - neg_score).sum( ) + self.params[1] * F.relu(pos_score - self.params[2]).sum() def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
TMUITLab/EAFR
N_TransE
false
1,115
[ "MIT" ]
0
dadb6485d48711ccb8aa2f03760aeb437645f1ff
https://github.com/TMUITLab/EAFR/tree/dadb6485d48711ccb8aa2f03760aeb437645f1ff
import torch import torch.nn.functional as F class Model(torch.nn.Module): def __init__(self, p, params): super().__init__() self.p = p self.params = params def forward(self, e1, r, e2): pred = -torch.norm(e1 + r - e2, p=self.p, dim=1) return pred def loss(self, pos_score, neg_score, target): return F.relu(pos_score + self.params[0] - neg_score).sum( ) + self.params[1] * F.relu(pos_score - self.params[2]).sum() def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [4, 4]
FM
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/5v/c5vqsotjiyyydjenxs3ttprtusysjuigcmpcuavw4a5cbsh4movc.py # Topologically Sorted Source Nodes: [sum_1, square_of_sum, mul, sum_of_square, cross_term, sum_3, cross_term_1], Original ATen: [aten.sum, aten.pow, aten.mul, aten.sub] # Source node to ATen node mapping: # cross_term => sub # cross_term_1 => mul_1 # mul => mul # square_of_sum => pow_1 # sum_1 => sum_1 # sum_3 => sum_3 # sum_of_square => sum_2 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%arg0_1, [1], True), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 2), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %arg0_1), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%pow_1, %sum_2), kwargs = {}) # %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%sub, [2]), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_3, 0.5), kwargs = {}) triton_poi_fused_mul_pow_sub_sum_0 = async_compile.triton('triton_poi_fused_mul_pow_sub_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_pow_sub_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_pow_sub_sum_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask) tmp1 = tl.load(in_ptr0 + (16 + x0 + (64*x1)), xmask) tmp3 = tl.load(in_ptr0 + (32 + x0 + (64*x1)), xmask) tmp5 = tl.load(in_ptr0 + (48 + x0 + (64*x1)), xmask) tmp16 = tl.load(in_ptr0 + (4 + x0 + (64*x1)), xmask) tmp17 = tl.load(in_ptr0 + (20 + x0 + (64*x1)), xmask) tmp19 = tl.load(in_ptr0 + (36 + x0 + (64*x1)), xmask) tmp21 = tl.load(in_ptr0 + (52 + x0 + (64*x1)), xmask) tmp33 = tl.load(in_ptr0 + (8 + x0 + (64*x1)), xmask) tmp34 = tl.load(in_ptr0 + (24 + x0 + (64*x1)), xmask) tmp36 = tl.load(in_ptr0 + (40 + x0 + (64*x1)), xmask) tmp38 = tl.load(in_ptr0 + (56 + x0 + (64*x1)), xmask) tmp50 = tl.load(in_ptr0 + (12 + x0 + (64*x1)), xmask) tmp51 = tl.load(in_ptr0 + (28 + x0 + (64*x1)), xmask) tmp53 = tl.load(in_ptr0 + (44 + x0 + (64*x1)), xmask) tmp55 = tl.load(in_ptr0 + (60 + x0 + (64*x1)), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = tmp6 * tmp6 tmp8 = tmp0 * tmp0 tmp9 = tmp1 * tmp1 tmp10 = tmp8 + tmp9 tmp11 = tmp3 * tmp3 tmp12 = tmp10 + tmp11 tmp13 = tmp5 * tmp5 tmp14 = tmp12 + tmp13 tmp15 = tmp7 - tmp14 tmp18 = tmp16 + tmp17 tmp20 = tmp18 + tmp19 tmp22 = tmp20 + tmp21 tmp23 = tmp22 * tmp22 tmp24 = tmp16 * tmp16 tmp25 = tmp17 * tmp17 tmp26 = tmp24 + tmp25 tmp27 = tmp19 * tmp19 tmp28 = tmp26 + tmp27 tmp29 = tmp21 * tmp21 tmp30 = tmp28 + tmp29 tmp31 = tmp23 - tmp30 tmp32 = tmp15 + tmp31 tmp35 = tmp33 + tmp34 tmp37 = tmp35 + tmp36 tmp39 = tmp37 + tmp38 tmp40 = tmp39 * tmp39 tmp41 = tmp33 * tmp33 tmp42 = tmp34 * tmp34 tmp43 = tmp41 + tmp42 tmp44 = tmp36 * tmp36 tmp45 = tmp43 + tmp44 tmp46 = tmp38 * tmp38 tmp47 = tmp45 + tmp46 tmp48 = tmp40 - tmp47 tmp49 = tmp32 + tmp48 tmp52 = tmp50 + tmp51 tmp54 = tmp52 + tmp53 tmp56 = tmp54 + tmp55 tmp57 = tmp56 * tmp56 tmp58 = tmp50 * tmp50 tmp59 = tmp51 * tmp51 tmp60 = tmp58 + tmp59 tmp61 = tmp53 * tmp53 tmp62 = tmp60 + tmp61 tmp63 = tmp55 * tmp55 tmp64 = tmp62 + tmp63 tmp65 = tmp57 - tmp64 tmp66 = tmp49 + tmp65 tmp67 = 0.5 tmp68 = tmp66 * tmp67 tl.store(in_out_ptr0 + (x2), tmp68, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32) buf1 = reinterpret_tensor(buf0, (4, 1, 4), (4, 4, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [sum_1, square_of_sum, mul, sum_of_square, cross_term, sum_3, cross_term_1], Original ATen: [aten.sum, aten.pow, aten.mul, aten.sub] stream0 = get_raw_stream(0) triton_poi_fused_mul_pow_sub_sum_0.run(buf1, arg0_1, 16, grid=grid(16), stream=stream0) del arg0_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn from sklearn.metrics import * class FM(nn.Module): """Factorization Machine models pairwise (order-2) feature interactions without linear term and bias. Input shape - 3D tensor with shape: ``(batch_size,field_size,embedding_size)``. Output shape - 2D tensor with shape: ``(batch_size, 1)``. References - [Factorization Machines](https://www.csie.ntu.edu.tw/~b97053/paper/Rendle2010FM.pdf) """ def __init__(self): super(FM, self).__init__() def forward(self, inputs): fm_input = inputs square_of_sum = torch.pow(torch.sum(fm_input, dim=1, keepdim=True), 2) sum_of_square = torch.sum(fm_input * fm_input, dim=1, keepdim=True) cross_term = square_of_sum - sum_of_square cross_term = 0.5 * torch.sum(cross_term, dim=2, keepdim=False) return cross_term def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn from sklearn.metrics import * assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_pow_sub_sum_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tmp1 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask) tmp3 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask) tmp5 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask) tmp16 = tl.load(in_ptr0 + (4 + x0 + 64 * x1), xmask) tmp17 = tl.load(in_ptr0 + (20 + x0 + 64 * x1), xmask) tmp19 = tl.load(in_ptr0 + (36 + x0 + 64 * x1), xmask) tmp21 = tl.load(in_ptr0 + (52 + x0 + 64 * x1), xmask) tmp33 = tl.load(in_ptr0 + (8 + x0 + 64 * x1), xmask) tmp34 = tl.load(in_ptr0 + (24 + x0 + 64 * x1), xmask) tmp36 = tl.load(in_ptr0 + (40 + x0 + 64 * x1), xmask) tmp38 = tl.load(in_ptr0 + (56 + x0 + 64 * x1), xmask) tmp50 = tl.load(in_ptr0 + (12 + x0 + 64 * x1), xmask) tmp51 = tl.load(in_ptr0 + (28 + x0 + 64 * x1), xmask) tmp53 = tl.load(in_ptr0 + (44 + x0 + 64 * x1), xmask) tmp55 = tl.load(in_ptr0 + (60 + x0 + 64 * x1), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = tmp6 * tmp6 tmp8 = tmp0 * tmp0 tmp9 = tmp1 * tmp1 tmp10 = tmp8 + tmp9 tmp11 = tmp3 * tmp3 tmp12 = tmp10 + tmp11 tmp13 = tmp5 * tmp5 tmp14 = tmp12 + tmp13 tmp15 = tmp7 - tmp14 tmp18 = tmp16 + tmp17 tmp20 = tmp18 + tmp19 tmp22 = tmp20 + tmp21 tmp23 = tmp22 * tmp22 tmp24 = tmp16 * tmp16 tmp25 = tmp17 * tmp17 tmp26 = tmp24 + tmp25 tmp27 = tmp19 * tmp19 tmp28 = tmp26 + tmp27 tmp29 = tmp21 * tmp21 tmp30 = tmp28 + tmp29 tmp31 = tmp23 - tmp30 tmp32 = tmp15 + tmp31 tmp35 = tmp33 + tmp34 tmp37 = tmp35 + tmp36 tmp39 = tmp37 + tmp38 tmp40 = tmp39 * tmp39 tmp41 = tmp33 * tmp33 tmp42 = tmp34 * tmp34 tmp43 = tmp41 + tmp42 tmp44 = tmp36 * tmp36 tmp45 = tmp43 + tmp44 tmp46 = tmp38 * tmp38 tmp47 = tmp45 + tmp46 tmp48 = tmp40 - tmp47 tmp49 = tmp32 + tmp48 tmp52 = tmp50 + tmp51 tmp54 = tmp52 + tmp53 tmp56 = tmp54 + tmp55 tmp57 = tmp56 * tmp56 tmp58 = tmp50 * tmp50 tmp59 = tmp51 * tmp51 tmp60 = tmp58 + tmp59 tmp61 = tmp53 * tmp53 tmp62 = tmp60 + tmp61 tmp63 = tmp55 * tmp55 tmp64 = tmp62 + tmp63 tmp65 = tmp57 - tmp64 tmp66 = tmp49 + tmp65 tmp67 = 0.5 tmp68 = tmp66 * tmp67 tl.store(in_out_ptr0 + x2, tmp68, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32) buf1 = reinterpret_tensor(buf0, (4, 1, 4), (4, 4, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_mul_pow_sub_sum_0[grid(16)](buf1, arg0_1, 16, XBLOCK=16, num_warps=1, num_stages=1) del arg0_1 return buf1, class FMNew(nn.Module): """Factorization Machine models pairwise (order-2) feature interactions without linear term and bias. Input shape - 3D tensor with shape: ``(batch_size,field_size,embedding_size)``. Output shape - 2D tensor with shape: ``(batch_size, 1)``. References - [Factorization Machines](https://www.csie.ntu.edu.tw/~b97053/paper/Rendle2010FM.pdf) """ def __init__(self): super(FMNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
Sunmyunghan/Final_Project
FM
false
1,117
[ "MIT" ]
0
28cde293dc6d07521b2e1c5613b20444aea91d21
https://github.com/Sunmyunghan/Final_Project/tree/28cde293dc6d07521b2e1c5613b20444aea91d21
import torch import torch.nn as nn from sklearn.metrics import * class Model(nn.Module): """Factorization Machine models pairwise (order-2) feature interactions without linear term and bias. Input shape - 3D tensor with shape: ``(batch_size,field_size,embedding_size)``. Output shape - 2D tensor with shape: ``(batch_size, 1)``. References - [Factorization Machines](https://www.csie.ntu.edu.tw/~b97053/paper/Rendle2010FM.pdf) """ def __init__(self): super().__init__() def forward(self, inputs): fm_input = inputs square_of_sum = torch.pow(torch.sum(fm_input, dim=1, keepdim=True), 2) sum_of_square = torch.sum(fm_input * fm_input, dim=1, keepdim=True) cross_term = square_of_sum - sum_of_square cross_term = 0.5 * torch.sum(cross_term, dim=2, keepdim=False) return cross_term def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
VertexDirectEmbedder
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/xq/cxqinuparlha25j4geyv6tolvpah7qdqdkpecjesyn3kblysszql.py # Topologically Sorted Source Nodes: [norm, clamp, truediv], Original ATen: [aten.linalg_vector_norm, aten.clamp, aten.div] # Source node to ATen node mapping: # clamp => clamp_min # norm => pow_1, pow_2, sum_1 # truediv => div # Graph fragment: # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_1, 2.0), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1], True), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {}) # %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%pow_2, 1e-06), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_1, %clamp_min), kwargs = {}) triton_poi_fused_clamp_div_linalg_vector_norm_0 = async_compile.triton('triton_poi_fused_clamp_div_linalg_vector_norm_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clamp_div_linalg_vector_norm_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clamp_div_linalg_vector_norm_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-06 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + (x2), tmp15, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [norm, clamp, truediv], Original ATen: [aten.linalg_vector_norm, aten.clamp, aten.div] stream0 = get_raw_stream(0) triton_poi_fused_clamp_div_linalg_vector_norm_0.run(primals_1, buf0, 16, grid=grid(16), stream=stream0) return (buf0, primals_1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.utils.data from torch import nn def normalize_embeddings(embeddings: 'torch.Tensor', epsilon: 'float'=1e-06 ) ->torch.Tensor: """ Normalize N D-dimensional embedding vectors arranged in a tensor [N, D] Args: embeddings (tensor [N, D]): N D-dimensional embedding vectors epsilon (float): minimum value for a vector norm Return: Normalized embeddings (tensor [N, D]), such that L2 vector norms are all equal to 1. """ return embeddings / torch.clamp(embeddings.norm(p=None, dim=1, keepdim= True), min=epsilon) class VertexDirectEmbedder(nn.Module): """ Class responsible for embedding vertices. Vertex embeddings take the form of a tensor of size [N, D], where N = number of vertices D = number of dimensions in the embedding space """ def __init__(self, num_vertices: 'int', embed_dim: 'int'): """ Initialize embedder, set random embeddings Args: num_vertices (int): number of vertices to embed embed_dim (int): number of dimensions in the embedding space """ super(VertexDirectEmbedder, self).__init__() self.embeddings = nn.Parameter(torch.Tensor(num_vertices, embed_dim)) self.reset_parameters() @torch.no_grad() def reset_parameters(self): """ Reset embeddings to random values """ torch.nn.init.uniform_(self.embeddings, a=-0.5, b=0.5) def forward(self) ->torch.Tensor: """ Produce vertex embeddings, a tensor of shape [N, D] where: N = number of vertices D = number of dimensions in the embedding space Return: Full vertex embeddings, a tensor of shape [N, D] """ return normalize_embeddings(self.embeddings) @torch.no_grad() def load(self, fpath: 'str'): """ Load data from a file Args: fpath (str): file path to load data from """ with PathManager.open(fpath, 'rb') as hFile: data = pickle.load(hFile) for name in ['embeddings']: if name in data: getattr(self, name).copy_(torch.tensor(data[name]).float()) def get_inputs(): return [] def get_init_inputs(): return [[], {'num_vertices': 4, 'embed_dim': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.utils.data from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_clamp_div_linalg_vector_norm_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-06 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + x2, tmp15, xmask) def call(args): primals_1, = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clamp_div_linalg_vector_norm_0[grid(16)](primals_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) return buf0, primals_1 def normalize_embeddings(embeddings: 'torch.Tensor', epsilon: 'float'=1e-06 ) ->torch.Tensor: """ Normalize N D-dimensional embedding vectors arranged in a tensor [N, D] Args: embeddings (tensor [N, D]): N D-dimensional embedding vectors epsilon (float): minimum value for a vector norm Return: Normalized embeddings (tensor [N, D]), such that L2 vector norms are all equal to 1. """ return embeddings / torch.clamp(embeddings.norm(p=None, dim=1, keepdim= True), min=epsilon) class VertexDirectEmbedderNew(nn.Module): """ Class responsible for embedding vertices. Vertex embeddings take the form of a tensor of size [N, D], where N = number of vertices D = number of dimensions in the embedding space """ def __init__(self, num_vertices: 'int', embed_dim: 'int'): """ Initialize embedder, set random embeddings Args: num_vertices (int): number of vertices to embed embed_dim (int): number of dimensions in the embedding space """ super(VertexDirectEmbedderNew, self).__init__() self.embeddings = nn.Parameter(torch.Tensor(num_vertices, embed_dim)) self.reset_parameters() @torch.no_grad() def reset_parameters(self): """ Reset embeddings to random values """ torch.nn.init.uniform_(self.embeddings, a=-0.5, b=0.5) @torch.no_grad() def load(self, fpath: 'str'): """ Load data from a file Args: fpath (str): file path to load data from """ with PathManager.open(fpath, 'rb') as hFile: data = pickle.load(hFile) for name in ['embeddings']: if name in data: getattr(self, name).copy_(torch.tensor(data[name]).float()) def forward(self): primals_1 = self.embeddings output = call([primals_1]) return output[0]
TWJianNuo/detectron2
VertexDirectEmbedder
false
1,118
[ "Apache-2.0" ]
0
091bc43e85b8f7cefdccebf8d85afb7cfff2a3f0
https://github.com/TWJianNuo/detectron2/tree/091bc43e85b8f7cefdccebf8d85afb7cfff2a3f0
import torch import torch.utils.data from torch import nn def normalize_embeddings(embeddings: 'torch.Tensor', epsilon: 'float'=1e-06 ) ->torch.Tensor: """ Normalize N D-dimensional embedding vectors arranged in a tensor [N, D] Args: embeddings (tensor [N, D]): N D-dimensional embedding vectors epsilon (float): minimum value for a vector norm Return: Normalized embeddings (tensor [N, D]), such that L2 vector norms are all equal to 1. """ return embeddings / torch.clamp(embeddings.norm(p=None, dim=1, keepdim= True), min=epsilon) class Model(nn.Module): """ Class responsible for embedding vertices. Vertex embeddings take the form of a tensor of size [N, D], where N = number of vertices D = number of dimensions in the embedding space """ def __init__(self, num_vertices: 'int', embed_dim: 'int'): """ Initialize embedder, set random embeddings Args: num_vertices (int): number of vertices to embed embed_dim (int): number of dimensions in the embedding space """ super().__init__() self.embeddings = nn.Parameter(torch.Tensor(num_vertices, embed_dim)) self.reset_parameters() @torch.no_grad() def reset_parameters(self): """ Reset embeddings to random values """ torch.nn.init.uniform_(self.embeddings, a=-0.5, b=0.5) def forward(self) ->torch.Tensor: """ Produce vertex embeddings, a tensor of shape [N, D] where: N = number of vertices D = number of dimensions in the embedding space Return: Full vertex embeddings, a tensor of shape [N, D] """ return normalize_embeddings(self.embeddings) @torch.no_grad() def load(self, fpath: 'str'): """ Load data from a file Args: fpath (str): file path to load data from """ with PathManager.open(fpath, 'rb') as hFile: data = pickle.load(hFile) for name in ['embeddings']: if name in data: getattr(self, name).copy_(torch.tensor(data[name]).float()) def get_inputs(): return [] def get_init_inputs(): return [4, 4]
HighWay
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/mq/cmqzn4tuak4dmmt5haqtljb4thsuzy6lq5otm3atysewj5viuiif.py # Topologically Sorted Source Nodes: [t_1, gate, mul, sub, mul_1, add_1], Original ATen: [aten.add, aten.sigmoid, aten.mul, aten.rsub] # Source node to ATen node mapping: # add_1 => add_1 # gate => sigmoid # mul => mul # mul_1 => mul_1 # sub => sub # t_1 => add # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm, %primals_3), kwargs = {}) # %sigmoid : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%add,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %primals_4), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %sigmoid), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %primals_2), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {}) triton_poi_fused_add_mul_rsub_sigmoid_0 = async_compile.triton('triton_poi_fused_add_mul_rsub_sigmoid_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_rsub_sigmoid_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_rsub_sigmoid_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr2 + (x2), xmask) tmp8 = tl.load(in_ptr3 + (x2), xmask) tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tmp5 = tmp3 * tmp4 tmp6 = 1.0 tmp7 = tmp6 - tmp3 tmp9 = tmp7 * tmp8 tmp10 = tmp5 + tmp9 tl.store(out_ptr0 + (x2), tmp10, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [t], Original ATen: [aten.mm] extern_kernels.mm(primals_2, primals_1, out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [t_1, gate, mul, sub, mul_1, add_1], Original ATen: [aten.add, aten.sigmoid, aten.mul, aten.rsub] stream0 = get_raw_stream(0) triton_poi_fused_add_mul_rsub_sigmoid_0.run(buf0, primals_3, primals_4, primals_2, buf1, 16, grid=grid(16), stream=stream0) return (buf1, primals_2, primals_3, primals_4, buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn from torch.nn import Parameter class HighWay(torch.nn.Module): def __init__(self, f_in, f_out, bias=True): super(HighWay, self).__init__() self.w = Parameter(torch.Tensor(f_in, f_out)) nn.init.xavier_uniform_(self.w) if bias: self.bias = Parameter(torch.Tensor(f_out)) nn.init.constant_(self.bias, 0) else: self.register_parameter('bias', None) def forward(self, in_1, in_2): t = torch.mm(in_1, self.w) if self.bias is not None: t = t + self.bias gate = torch.sigmoid(t) return gate * in_2 + (1.0 - gate) * in_1 def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'f_in': 4, 'f_out': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn from torch.nn import Parameter assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_mul_rsub_sigmoid_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr2 + x2, xmask) tmp8 = tl.load(in_ptr3 + x2, xmask) tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tmp5 = tmp3 * tmp4 tmp6 = 1.0 tmp7 = tmp6 - tmp3 tmp9 = tmp7 * tmp8 tmp10 = tmp5 + tmp9 tl.store(out_ptr0 + x2, tmp10, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_2, primals_1, out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mul_rsub_sigmoid_0[grid(16)](buf0, primals_3, primals_4, primals_2, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) return buf1, primals_2, primals_3, primals_4, buf0 class HighWayNew(torch.nn.Module): def __init__(self, f_in, f_out, bias=True): super(HighWayNew, self).__init__() self.w = Parameter(torch.Tensor(f_in, f_out)) nn.init.xavier_uniform_(self.w) if bias: self.bias = Parameter(torch.Tensor(f_out)) nn.init.constant_(self.bias, 0) else: self.register_parameter('bias', None) def forward(self, input_0, input_1): primals_1 = self.w primals_3 = self.bias primals_2 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
TMUITLab/EAFR
HighWay
false
1,119
[ "MIT" ]
0
dadb6485d48711ccb8aa2f03760aeb437645f1ff
https://github.com/TMUITLab/EAFR/tree/dadb6485d48711ccb8aa2f03760aeb437645f1ff
import torch import torch.nn as nn from torch.nn import Parameter class Model(torch.nn.Module): def __init__(self, f_in, f_out, bias=True): super().__init__() self.w = Parameter(torch.Tensor(f_in, f_out)) nn.init.xavier_uniform_(self.w) if bias: self.bias = Parameter(torch.Tensor(f_out)) nn.init.constant_(self.bias, 0) else: self.register_parameter('bias', None) def forward(self, in_1, in_2): t = torch.mm(in_1, self.w) if self.bias is not None: t = t + self.bias gate = torch.sigmoid(t) return gate * in_2 + (1.0 - gate) * in_1 def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [4, 4]
Network
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/nq/cnqjufcqn3ur3s7xvlb2i747nyf24md4zaiatlwgkasynplfjstu.py # Topologically Sorted Source Nodes: [t], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # t => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) # %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4096], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4096 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, None) tl.store(out_ptr0 + (x2), tmp6, None) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/j4/cj4miacghwuwo6tmp3hylr7yjqyun32g4pisr65oc2dtlcxfwv2f.py # Topologically Sorted Source Nodes: [t_2], Original ATen: [aten._softmax] # Source node to ATen node mapping: # t_2 => amax, exp, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_5, [0], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_5, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (64 + x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (128 + x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (192 + x0), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x2), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/uy/cuylqrd7ye33ogvvpsnxb3skali4boxth4tryw5hn4czjzyh4a34.py # Topologically Sorted Source Nodes: [t_2], Original ATen: [aten._softmax] # Source node to ATen node mapping: # t_2 => div, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [0], True), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (64 + x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (128 + x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (192 + x0), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (64, 4), (4, 1)) assert_size_stride(primals_2, (64, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (64, 64), (64, 1)) assert_size_stride(primals_5, (64, ), (1, )) assert_size_stride(primals_6, (4, 64), (64, 1)) assert_size_stride(primals_7, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 64), (64, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 64), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 64), (1024, 256, 64, 1), 0); del buf0 # reuse buf8 = empty_strided_cuda((4, 4, 4, 64), (1024, 256, 64, 1), torch.bool) # Topologically Sorted Source Nodes: [t], Original ATen: [aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf8, 4096, grid=grid(4096), stream=stream0) del primals_2 buf2 = empty_strided_cuda((64, 64), (64, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf1, (64, 64), (64, 1), 0), reinterpret_tensor(primals_4, (64, 64), (1, 64), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 64), (1024, 256, 64, 1), 0); del buf2 # reuse buf7 = empty_strided_cuda((4, 4, 4, 64), (1024, 256, 64, 1), torch.bool) # Topologically Sorted Source Nodes: [t_1], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_0.run(buf3, primals_5, buf7, 4096, grid=grid(4096), stream=stream0) del primals_5 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.addmm] extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 64), (64, 1), 0), reinterpret_tensor(primals_6, (64, 4), (1, 64), 0), alpha=1, beta=1, out=buf4) del primals_7 buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [t_2], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf4, buf5, 256, grid=grid(256), stream=stream0) buf6 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf4 # reuse # Topologically Sorted Source Nodes: [t_2], Original ATen: [aten._softmax] triton_poi_fused__softmax_2.run(buf5, buf6, 256, grid=grid(256), stream=stream0) del buf5 return (buf6, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 64), (64, 1), 0), reinterpret_tensor(buf3, (64, 64), (64, 1), 0), buf6, primals_6, buf7, primals_4, buf8, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((64, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((64, 64), (64, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 64), (64, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn from torch.nn.functional import relu from torch.nn.functional import softmax class Network(nn.Module): def __init__(self, input_size, output_size): super().__init__() self.input_size = input_size self.output_size = output_size self.fc1 = nn.Linear(in_features=input_size, out_features=64) self.fc2 = nn.Linear(in_features=64, out_features=64) self.out = nn.Linear(in_features=64, out_features=output_size) def forward(self, t): t = relu(self.fc1(t)) t = relu(self.fc2(t)) t = softmax(self.out(t), dim=0) return t def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_size': 4, 'output_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, None) tl.store(out_ptr0 + x2, tmp6, None) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (64 + x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (128 + x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (192 + x0), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (64 + x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (128 + x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (192 + x0), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (64, 4), (4, 1)) assert_size_stride(primals_2, (64,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (64, 64), (64, 1)) assert_size_stride(primals_5, (64,), (1,)) assert_size_stride(primals_6, (4, 64), (64, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 64), (64, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 64), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 64), (1024, 256, 64, 1), 0) del buf0 buf8 = empty_strided_cuda((4, 4, 4, 64), (1024, 256, 64, 1), torch.bool ) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(4096)](buf1, primals_2, buf8, 4096, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 64), (64, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 64), (64, 1), 0), reinterpret_tensor(primals_4, (64, 64), (1, 64), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 64), (1024, 256, 64, 1), 0) del buf2 buf7 = empty_strided_cuda((4, 4, 4, 64), (1024, 256, 64, 1), torch.bool ) triton_poi_fused_relu_threshold_backward_0[grid(4096)](buf3, primals_5, buf7, 4096, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 64), (64, 1), 0), reinterpret_tensor(primals_6, (64, 4), (1, 64), 0), alpha=1, beta=1, out=buf4) del primals_7 buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_1[grid(256)](buf4, buf5, 256, XBLOCK=128, num_warps=4, num_stages=1) buf6 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf4 triton_poi_fused__softmax_2[grid(256)](buf5, buf6, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf5 return buf6, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 64), (64, 1), 0), reinterpret_tensor( buf3, (64, 64), (64, 1), 0), buf6, primals_6, buf7, primals_4, buf8 class NetworkNew(nn.Module): def __init__(self, input_size, output_size): super().__init__() self.input_size = input_size self.output_size = output_size self.fc1 = nn.Linear(in_features=input_size, out_features=64) self.fc2 = nn.Linear(in_features=64, out_features=64) self.out = nn.Linear(in_features=64, out_features=output_size) def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_6 = self.out.weight primals_7 = self.out.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
THE-RAF/Reinforcement-Learning
Network
false
1,120
[ "MIT" ]
0
36b4c5330740b533fb8170263f995afb91a1d021
https://github.com/THE-RAF/Reinforcement-Learning/tree/36b4c5330740b533fb8170263f995afb91a1d021
import torch import torch.nn as nn from torch.nn.functional import relu from torch.nn.functional import softmax class Model(nn.Module): def __init__(self, input_size, output_size): super().__init__() self.input_size = input_size self.output_size = output_size self.fc1 = nn.Linear(in_features=input_size, out_features=64) self.fc2 = nn.Linear(in_features=64, out_features=64) self.out = nn.Linear(in_features=64, out_features=output_size) def forward(self, t): t = relu(self.fc1(t)) t = relu(self.fc2(t)) t = softmax(self.out(t), dim=0) return t def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 4]
SpatialCrossMapLRN
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/t4/ct42qpaygn7av2p6rystjl4hk3ybzwp5jyvmk3jaiukfiri3pq65.py # Topologically Sorted Source Nodes: [mul, add, div_2, x], Original ATen: [aten.mul, aten.add, aten.pow, aten.div] # Source node to ATen node mapping: # add => add # div_2 => pow_2 # mul => mul # x => div # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%squeeze, 1.0), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, 1), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%add, 0.75), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%arg0_1, %pow_2), kwargs = {}) triton_poi_fused_add_div_mul_pow_0 = async_compile.triton('triton_poi_fused_add_div_mul_pow_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mul_pow_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_mul_pow_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tmp0 * tmp0 tmp2 = 1.0 tmp3 = tmp1 * tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 + tmp2 tmp6 = 0.75 tmp7 = libdevice.pow(tmp5, tmp6) tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x0), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul, add, div_2, x], Original ATen: [aten.mul, aten.add, aten.pow, aten.div] stream0 = get_raw_stream(0) triton_poi_fused_add_div_mul_pow_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data class SpatialCrossMapLRN(nn.Module): def __init__(self, local_size=1, alpha=1.0, beta=0.75, k=1, ACROSS_CHANNELS=True): super(SpatialCrossMapLRN, self).__init__() self.ACROSS_CHANNELS = ACROSS_CHANNELS if ACROSS_CHANNELS: self.average = nn.AvgPool3d(kernel_size=(local_size, 1, 1), stride=1, padding=(int((local_size - 1.0) / 2), 0, 0)) else: self.average = nn.AvgPool2d(kernel_size=local_size, stride=1, padding=int((local_size - 1.0) / 2)) self.alpha = alpha self.beta = beta self.k = k def forward(self, x): if self.ACROSS_CHANNELS: div = x.pow(2).unsqueeze(1) div = self.average(div).squeeze(1) div = div.mul(self.alpha).add(self.k).pow(self.beta) else: div = x.pow(2) div = self.average(div) div = div.mul(self.alpha).add(self.k).pow(self.beta) x = x.div(div) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_div_mul_pow_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tmp0 * tmp0 tmp2 = 1.0 tmp3 = tmp1 * tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 + tmp2 tmp6 = 0.75 tmp7 = libdevice.pow(tmp5, tmp6) tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x0, tmp8, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_mul_pow_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf0, class SpatialCrossMapLRNNew(nn.Module): def __init__(self, local_size=1, alpha=1.0, beta=0.75, k=1, ACROSS_CHANNELS=True): super(SpatialCrossMapLRNNew, self).__init__() self.ACROSS_CHANNELS = ACROSS_CHANNELS if ACROSS_CHANNELS: self.average = nn.AvgPool3d(kernel_size=(local_size, 1, 1), stride=1, padding=(int((local_size - 1.0) / 2), 0, 0)) else: self.average = nn.AvgPool2d(kernel_size=local_size, stride=1, padding=int((local_size - 1.0) / 2)) self.alpha = alpha self.beta = beta self.k = k def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
Tagussan/pretrained-models.pytorch
SpatialCrossMapLRN
false
1,121
[ "BSD-3-Clause" ]
0
854e6c153c2534dd7cf76a5ec102307ea5171167
https://github.com/Tagussan/pretrained-models.pytorch/tree/854e6c153c2534dd7cf76a5ec102307ea5171167
import torch import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data class Model(nn.Module): def __init__(self, local_size=1, alpha=1.0, beta=0.75, k=1, ACROSS_CHANNELS=True): super().__init__() self.ACROSS_CHANNELS = ACROSS_CHANNELS if ACROSS_CHANNELS: self.average = nn.AvgPool3d(kernel_size=(local_size, 1, 1), stride=1, padding=(int((local_size - 1.0) / 2), 0, 0)) else: self.average = nn.AvgPool2d(kernel_size=local_size, stride=1, padding=int((local_size - 1.0) / 2)) self.alpha = alpha self.beta = beta self.k = k def forward(self, x): if self.ACROSS_CHANNELS: div = x.pow(2).unsqueeze(1) div = self.average(div).squeeze(1) div = div.mul(self.alpha).add(self.k).pow(self.beta) else: div = x.pow(2) div = self.average(div) div = div.mul(self.alpha).add(self.k).pow(self.beta) x = x.div(div) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
MLPBase
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/ix/cixxyusyg44s2hkoufcgbrv3ix5ookwqjl4ia3xkv7bdqi4yrzus.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # x => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) # %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32768], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 25600 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 400 x2 = xindex % 1600 x3 = (xindex // 1600) tmp0 = tl.load(in_out_ptr0 + (x4), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x4), tmp4, xmask) tl.store(out_ptr0 + (x2 + (1664*x3)), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/op/coptu6xep3awc4lajb4xivopppqmjtx3zy7ebtazm45rqvyeknds.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # x_1 => relu_1 # Graph fragment: # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_3,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_relu_threshold_backward_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32768], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 19200 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 300 x2 = (xindex // 1200) x3 = xindex % 1200 tmp0 = tl.load(in_ptr0 + (x4), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x3 + (1216*x2)), tmp4, xmask) tl.store(out_ptr1 + (x3 + (1280*x2)), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/as/casrc7bf7ghsendgi7tkqxk3hj4ic6aqb4rmkxzuk5dhbidznia7.py # Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.relu, aten.view] # Source node to ATen node mapping: # x_1 => relu_1 # x_2 => view_4 # Graph fragment: # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_3,), kwargs = {}) # %view_4 : [num_users=2] = call_function[target=torch.ops.aten.reshape.default](args = (%relu_1, [64, 300]), kwargs = {}) triton_poi_fused_relu_view_2 = async_compile.triton('triton_poi_fused_relu_view_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32768], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_view_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_view_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 19200 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 300 x1 = (xindex // 300) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (300*(x1 % 4)) + (1216*(x1 // 4))), xmask) tl.store(out_ptr0 + (x2), tmp0, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (400, 4), (4, 1)) assert_size_stride(primals_2, (400, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (300, 400), (400, 1)) assert_size_stride(primals_5, (300, ), (1, )) assert_size_stride(primals_6, (4, 300), (300, 1)) assert_size_stride(primals_7, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 400), (400, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 400), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 400), (6400, 1600, 400, 1), 0); del buf0 # reuse buf7 = empty_strided_cuda((4, 4, 4, 400), (6656, 1664, 400, 1), torch.bool) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf7, 25600, grid=grid(25600), stream=stream0) del primals_2 buf2 = empty_strided_cuda((64, 300), (300, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf1, (64, 400), (400, 1), 0), reinterpret_tensor(primals_4, (400, 300), (1, 400), 0), out=buf2) buf3 = empty_strided_cuda((4, 4, 4, 300), (4864, 1216, 300, 1), torch.float32) buf6 = empty_strided_cuda((4, 4, 4, 300), (5120, 1280, 300, 1), torch.bool) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_1.run(buf2, primals_5, buf3, buf6, 19200, grid=grid(19200), stream=stream0) del primals_5 buf4 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.relu, aten.view] triton_poi_fused_relu_view_2.run(buf3, buf4, 19200, grid=grid(19200), stream=stream0) del buf3 buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.addmm] extern_kernels.addmm(primals_7, buf4, reinterpret_tensor(primals_6, (300, 4), (1, 300), 0), alpha=1, beta=1, out=buf5) del primals_7 return (reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 400), (400, 1), 0), buf4, primals_6, buf6, primals_4, buf7, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((400, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((400, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((300, 400), (400, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((300, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 300), (300, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn import torch.nn.functional as F class MLPBase(nn.Module): def __init__(self, num_inputs, num_outputs): super(MLPBase, self).__init__() self.l1 = nn.Linear(num_inputs, 400) self.l2 = nn.Linear(400, 300) self.l3 = nn.Linear(300, num_outputs) def forward(self, inputs): x = F.relu(self.l1(inputs)) x = F.relu(self.l2(x)) x = self.l3(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_inputs': 4, 'num_outputs': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 25600 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 400 x2 = xindex % 1600 x3 = xindex // 1600 tmp0 = tl.load(in_out_ptr0 + x4, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x4, tmp4, xmask) tl.store(out_ptr0 + (x2 + 1664 * x3), tmp6, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 19200 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 300 x2 = xindex // 1200 x3 = xindex % 1200 tmp0 = tl.load(in_ptr0 + x4, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x3 + 1216 * x2), tmp4, xmask) tl.store(out_ptr1 + (x3 + 1280 * x2), tmp6, xmask) @triton.jit def triton_poi_fused_relu_view_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 19200 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 300 x1 = xindex // 300 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 300 * (x1 % 4) + 1216 * (x1 // 4)), xmask) tl.store(out_ptr0 + x2, tmp0, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (400, 4), (4, 1)) assert_size_stride(primals_2, (400,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (300, 400), (400, 1)) assert_size_stride(primals_5, (300,), (1,)) assert_size_stride(primals_6, (4, 300), (300, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 400), (400, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 400), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 400), (6400, 1600, 400, 1), 0 ) del buf0 buf7 = empty_strided_cuda((4, 4, 4, 400), (6656, 1664, 400, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(25600)](buf1, primals_2, buf7, 25600, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 300), (300, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 400), (400, 1), 0), reinterpret_tensor(primals_4, (400, 300), (1, 400), 0), out=buf2) buf3 = empty_strided_cuda((4, 4, 4, 300), (4864, 1216, 300, 1), torch.float32) buf6 = empty_strided_cuda((4, 4, 4, 300), (5120, 1280, 300, 1), torch.bool) triton_poi_fused_relu_threshold_backward_1[grid(19200)](buf2, primals_5, buf3, buf6, 19200, XBLOCK=256, num_warps=4, num_stages=1 ) del primals_5 buf4 = buf2 del buf2 triton_poi_fused_relu_view_2[grid(19200)](buf3, buf4, 19200, XBLOCK =256, num_warps=4, num_stages=1) del buf3 buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, buf4, reinterpret_tensor(primals_6, (300, 4), (1, 300), 0), alpha=1, beta=1, out=buf5) del primals_7 return reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 400), (400, 1), 0 ), buf4, primals_6, buf6, primals_4, buf7 class MLPBaseNew(nn.Module): def __init__(self, num_inputs, num_outputs): super(MLPBaseNew, self).__init__() self.l1 = nn.Linear(num_inputs, 400) self.l2 = nn.Linear(400, 300) self.l3 = nn.Linear(300, num_outputs) def forward(self, input_0): primals_1 = self.l1.weight primals_2 = self.l1.bias primals_4 = self.l2.weight primals_5 = self.l2.bias primals_6 = self.l3.weight primals_7 = self.l3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
TachikakaMin/dreamer-torch
MLPBase
false
1,122
[ "MIT" ]
0
3c99526f4507e28cf8b34ada0321001adcf8ae1f
https://github.com/TachikakaMin/dreamer-torch/tree/3c99526f4507e28cf8b34ada0321001adcf8ae1f
import torch from torch import nn import torch.nn.functional as F class Model(nn.Module): def __init__(self, num_inputs, num_outputs): super().__init__() self.l1 = nn.Linear(num_inputs, 400) self.l2 = nn.Linear(400, 300) self.l3 = nn.Linear(300, num_outputs) def forward(self, inputs): x = F.relu(self.l1(inputs)) x = F.relu(self.l2(x)) x = self.l3(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 4]
N_R_Align
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/dl/cdl26m2hy376ftir63aeqndptbftlfcqva4itwmmgwwbyrs6eth2.py # Topologically Sorted Source Nodes: [cosine_similarity], Original ATen: [aten.linalg_vector_norm, aten.clamp_min, aten.div, aten.mul] # Source node to ATen node mapping: # cosine_similarity => clamp_min, clamp_min_1, div, div_1, mul, pow_1, pow_2, pow_3, pow_4, sum_1, sum_2 # Graph fragment: # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg1_1, 2), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1], True), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {}) # %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%pow_2, 1e-06), kwargs = {}) # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%arg1_1, %clamp_min), kwargs = {}) # %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg0_1, 2), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_3, [1], True), kwargs = {}) # %pow_4 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_2, 0.5), kwargs = {}) # %clamp_min_1 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%pow_4, 1e-06), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%arg0_1, %clamp_min_1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div_1, %div), kwargs = {}) triton_poi_fused_clamp_min_div_linalg_vector_norm_mul_0 = async_compile.triton('triton_poi_fused_clamp_min_div_linalg_vector_norm_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clamp_min_div_linalg_vector_norm_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 10, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clamp_min_div_linalg_vector_norm_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr1 + (x3), xmask) tmp17 = tl.load(in_ptr1 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp19 = tl.load(in_ptr1 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp22 = tl.load(in_ptr1 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp25 = tl.load(in_ptr1 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-06 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tmp18 = tmp17 * tmp17 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = libdevice.sqrt(tmp27) tmp29 = triton_helpers.maximum(tmp28, tmp13) tmp30 = tmp16 / tmp29 tmp31 = tmp15 * tmp30 tl.store(out_ptr0 + (x3), tmp31, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/eu/ceuwdky2xdaefd6i42mj44mw54md2rx7qikfobocu3ikagjrvotq.py # Topologically Sorted Source Nodes: [cosine_similarity, sigmoid, mul, cosine_similarity_1, sigmoid_1, mul_1, add], Original ATen: [aten.sum, aten.sigmoid, aten.mul, aten.add] # Source node to ATen node mapping: # add => add # cosine_similarity => sum_3 # cosine_similarity_1 => sum_6 # mul => mul_1 # mul_1 => mul_3 # sigmoid => sigmoid # sigmoid_1 => sigmoid_1 # Graph fragment: # %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {}) # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%sum_3,), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, 4), kwargs = {}) # %sum_6 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_2, [1]), kwargs = {}) # %sigmoid_1 : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%sum_6,), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid_1, -3), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %mul_3), kwargs = {}) triton_poi_fused_add_mul_sigmoid_sum_1 = async_compile.triton('triton_poi_fused_add_mul_sigmoid_sum_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_sigmoid_sum_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_sigmoid_sum_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = (xindex // 16) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask) tmp1 = tl.load(in_ptr0 + (16 + x0 + (64*x1)), xmask) tmp3 = tl.load(in_ptr0 + (32 + x0 + (64*x1)), xmask) tmp5 = tl.load(in_ptr0 + (48 + x0 + (64*x1)), xmask) tmp10 = tl.load(in_ptr1 + (x0 + (64*x1)), xmask) tmp11 = tl.load(in_ptr1 + (16 + x0 + (64*x1)), xmask) tmp13 = tl.load(in_ptr1 + (32 + x0 + (64*x1)), xmask) tmp15 = tl.load(in_ptr1 + (48 + x0 + (64*x1)), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = tl.sigmoid(tmp6) tmp8 = 4.0 tmp9 = tmp7 * tmp8 tmp12 = tmp10 + tmp11 tmp14 = tmp12 + tmp13 tmp16 = tmp14 + tmp15 tmp17 = tl.sigmoid(tmp16) tmp18 = -3.0 tmp19 = tmp17 * tmp18 tmp20 = tmp9 + tmp19 tl.store(out_ptr0 + (x2), tmp20, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1, arg2_1, arg3_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [cosine_similarity], Original ATen: [aten.linalg_vector_norm, aten.clamp_min, aten.div, aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_clamp_min_div_linalg_vector_norm_mul_0.run(arg1_1, arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 del arg1_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [cosine_similarity_1], Original ATen: [aten.linalg_vector_norm, aten.clamp_min, aten.div, aten.mul] triton_poi_fused_clamp_min_div_linalg_vector_norm_mul_0.run(arg3_1, arg2_1, buf1, 256, grid=grid(256), stream=stream0) del arg2_1 del arg3_1 buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [cosine_similarity, sigmoid, mul, cosine_similarity_1, sigmoid_1, mul_1, add], Original ATen: [aten.sum, aten.sigmoid, aten.mul, aten.add] triton_poi_fused_add_mul_sigmoid_sum_1.run(buf0, buf1, buf2, 64, grid=grid(64), stream=stream0) del buf0 del buf1 return (buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg3_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1, arg2_1, arg3_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class N_R_Align(torch.nn.Module): def __init__(self, params): super(N_R_Align, self).__init__() self.params = params self.cos_sim = nn.CosineSimilarity(dim=1, eps=1e-06) def forward(self, e1, e2, n1, n2): return self.params * torch.sigmoid(self.cos_sim(n1, n2)) + (1 - self.params) * torch.sigmoid(self.cos_sim(e1, e2)) def loss(self, pos_score, neg_score, target): return -torch.log(pos_score).sum() def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'params': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_clamp_min_div_linalg_vector_norm_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp16 = tl.load(in_ptr1 + x3, xmask) tmp17 = tl.load(in_ptr1 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp19 = tl.load(in_ptr1 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp22 = tl.load(in_ptr1 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp25 = tl.load(in_ptr1 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-06 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tmp18 = tmp17 * tmp17 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = libdevice.sqrt(tmp27) tmp29 = triton_helpers.maximum(tmp28, tmp13) tmp30 = tmp16 / tmp29 tmp31 = tmp15 * tmp30 tl.store(out_ptr0 + x3, tmp31, xmask) @triton.jit def triton_poi_fused_add_mul_sigmoid_sum_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tmp1 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask) tmp3 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask) tmp5 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask) tmp10 = tl.load(in_ptr1 + (x0 + 64 * x1), xmask) tmp11 = tl.load(in_ptr1 + (16 + x0 + 64 * x1), xmask) tmp13 = tl.load(in_ptr1 + (32 + x0 + 64 * x1), xmask) tmp15 = tl.load(in_ptr1 + (48 + x0 + 64 * x1), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = tl.sigmoid(tmp6) tmp8 = 4.0 tmp9 = tmp7 * tmp8 tmp12 = tmp10 + tmp11 tmp14 = tmp12 + tmp13 tmp16 = tmp14 + tmp15 tmp17 = tl.sigmoid(tmp16) tmp18 = -3.0 tmp19 = tmp17 * tmp18 tmp20 = tmp9 + tmp19 tl.store(out_ptr0 + x2, tmp20, xmask) def call(args): arg0_1, arg1_1, arg2_1, arg3_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clamp_min_div_linalg_vector_norm_mul_0[grid(256)]( arg1_1, arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 del arg1_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_clamp_min_div_linalg_vector_norm_mul_0[grid(256)]( arg3_1, arg2_1, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg2_1 del arg3_1 buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_mul_sigmoid_sum_1[grid(64)](buf0, buf1, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf0 del buf1 return buf2, class N_R_AlignNew(torch.nn.Module): def __init__(self, params): super(N_R_AlignNew, self).__init__() self.params = params self.cos_sim = nn.CosineSimilarity(dim=1, eps=1e-06) def loss(self, pos_score, neg_score, target): return -torch.log(pos_score).sum() def forward(self, input_0, input_1, input_2, input_3): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 arg3_1 = input_3 output = call([arg0_1, arg1_1, arg2_1, arg3_1]) return output[0]
TMUITLab/EAFR
N_R_Align
false
1,123
[ "MIT" ]
0
dadb6485d48711ccb8aa2f03760aeb437645f1ff
https://github.com/TMUITLab/EAFR/tree/dadb6485d48711ccb8aa2f03760aeb437645f1ff
import torch import torch.nn as nn class Model(torch.nn.Module): def __init__(self, params): super().__init__() self.params = params self.cos_sim = nn.CosineSimilarity(dim=1, eps=1e-06) def forward(self, e1, e2, n1, n2): return self.params * torch.sigmoid(self.cos_sim(n1, n2)) + (1 - self.params) * torch.sigmoid(self.cos_sim(e1, e2)) def loss(self, pos_score, neg_score, target): return -torch.log(pos_score).sum() def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4]
FC
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/zk/czkecze4wfcslvwkeplzfjv4ih2ndbfnpgq55lpnflnsjgnc72g4.py # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.leaky_relu, aten.view, aten.leaky_relu_backward] # Source node to ATen node mapping: # out_1 => gt, mul, view_3, where # Graph fragment: # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view_1, 0), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 0.2), kwargs = {}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %view_1, %mul), kwargs = {}) # %view_3 : [num_users=1] = call_function[target=torch.ops.aten.reshape.default](args = (%view_2, [4, 4, 4, 4]), kwargs = {}) # %gt_1 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view_8, 0), kwargs = {}) triton_poi_fused_leaky_relu_leaky_relu_backward_view_0 = async_compile.triton('triton_poi_fused_leaky_relu_leaky_relu_backward_view_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_leaky_relu_backward_view_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_leaky_relu_leaky_relu_backward_view_0(in_out_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 0.2 tmp4 = tmp0 * tmp3 tmp5 = tl.where(tmp2, tmp0, tmp4) tmp6 = tmp5 > tmp1 tl.store(out_ptr0 + (x0), tmp5, xmask) tl.store(out_ptr1 + (x0), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [out], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.leaky_relu, aten.view, aten.leaky_relu_backward] stream0 = get_raw_stream(0) triton_poi_fused_leaky_relu_leaky_relu_backward_view_0.run(buf1, buf2, buf3, 256, grid=grid(256), stream=stream0) del buf1 return (buf2, reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class FC(nn.Module): def __init__(self, in_channels, out_channels, use_bias=False, activation='LR', gain=2 ** 0.5): super(FC, self).__init__() self.he_std = in_channels * -0.5 * gain self.weight = torch.nn.Parameter(torch.randn(out_channels, in_channels) * self.he_std) if use_bias: self.bias = torch.nn.Parameter(torch.zeros(out_channels)) else: self.bias = None if activation == 'LR': self.activation = nn.LeakyReLU(0.2, inplace=True) elif activation == 'Sigmoid': self.activation = nn.Sigmoid() elif activation is None: self.activation = None else: assert 0, " STGAN's FC reruires LR or Sigmoid, not{}".format( activation) def forward(self, x): if self.bias is not None: out = F.linear(x, self.weight, self.bias) else: out = F.linear(x, self.weight) if self.activation: out = self.activation(out) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_leaky_relu_leaky_relu_backward_view_0(in_out_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 0.2 tmp4 = tmp0 * tmp3 tmp5 = tl.where(tmp2, tmp0, tmp4) tmp6 = tmp5 > tmp1 tl.store(out_ptr0 + x0, tmp5, xmask) tl.store(out_ptr1 + x0, tmp6, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_leaky_relu_leaky_relu_backward_view_0[grid(256)](buf1, buf2, buf3, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf1 return buf2, reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), buf3 class FCNew(nn.Module): def __init__(self, in_channels, out_channels, use_bias=False, activation='LR', gain=2 ** 0.5): super(FCNew, self).__init__() self.he_std = in_channels * -0.5 * gain self.weight = torch.nn.Parameter(torch.randn(out_channels, in_channels) * self.he_std) if use_bias: self.bias = torch.nn.Parameter(torch.zeros(out_channels)) else: self.bias = None if activation == 'LR': self.activation = nn.LeakyReLU(0.2, inplace=True) elif activation == 'Sigmoid': self.activation = nn.Sigmoid() elif activation is None: self.activation = None else: assert 0, " STGAN's FC reruires LR or Sigmoid, not{}".format( activation) def forward(self, input_0): primals_1 = self.weight primals_2 = input_0 output = call([primals_1, primals_2]) return output[0]
TOMeoww/STGAN
FC
false
1,124
[ "MIT" ]
0
090a4024999e68f017140312ecfdd0d4dc3dc425
https://github.com/TOMeoww/STGAN/tree/090a4024999e68f017140312ecfdd0d4dc3dc425
import torch import torch.nn as nn import torch.nn.functional as F class Model(nn.Module): def __init__(self, in_channels, out_channels, use_bias=False, activation='LR', gain=2 ** 0.5): super().__init__() self.he_std = in_channels * -0.5 * gain self.weight = torch.nn.Parameter(torch.randn(out_channels, in_channels) * self.he_std) if use_bias: self.bias = torch.nn.Parameter(torch.zeros(out_channels)) else: self.bias = None if activation == 'LR': self.activation = nn.LeakyReLU(0.2, inplace=True) elif activation == 'Sigmoid': self.activation = nn.Sigmoid() elif activation is None: self.activation = None else: assert 0, " STGAN's FC reruires LR or Sigmoid, not{}".format( activation) def forward(self, x): if self.bias is not None: out = F.linear(x, self.weight, self.bias) else: out = F.linear(x, self.weight) if self.activation: out = self.activation(out) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 4]
Mean
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/7g/c7gvt7fcrwihu3eypgqeru4bybzepmpynh45oes44jy2ps6isyr7.py # Topologically Sorted Source Nodes: [std_mean], Original ATen: [aten.std_mean] # Source node to ATen node mapping: # std_mean => var_mean # Graph fragment: # %var_mean : [num_users=1] = call_function[target=torch.ops.aten.var_mean.correction](args = (%arg0_1, [4]), kwargs = {correction: 1.0}) triton_poi_fused_std_mean_0 = async_compile.triton('triton_poi_fused_std_mean_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_std_mean_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_std_mean_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tl.store(out_ptr0 + (x0), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4, 1), (64, 16, 4, 1, 256), torch.float32) # Topologically Sorted Source Nodes: [std_mean], Original ATen: [aten.std_mean] stream0 = get_raw_stream(0) triton_poi_fused_std_mean_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch class Mean(torch.nn.Module): def __init__(self, dim): super().__init__() self.dim = dim def forward(self, x): _std, mean = torch.std_mean(x, self.dim) return mean def get_inputs(): return [torch.rand([4, 4, 4, 4, 4])] def get_init_inputs(): return [[], {'dim': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_std_mean_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tl.store(out_ptr0 + x0, tmp8, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4, 1), (64, 16, 4, 1, 256), torch.float32) get_raw_stream(0) triton_poi_fused_std_mean_0[grid(256)](arg0_1, buf0, 256, XBLOCK= 256, num_warps=4, num_stages=1) del arg0_1 return reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0), class MeanNew(torch.nn.Module): def __init__(self, dim): super().__init__() self.dim = dim def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
Tahlor/glom-pytorch
Mean
false
1,125
[ "MIT" ]
0
45b2fc52af5288cd53611e497a70d53ffa303410
https://github.com/Tahlor/glom-pytorch/tree/45b2fc52af5288cd53611e497a70d53ffa303410
import torch class Model(torch.nn.Module): def __init__(self, dim): super().__init__() self.dim = dim def forward(self, x): _std, mean = torch.std_mean(x, self.dim) return mean def get_inputs(): return [torch.rand([4, 4, 4, 4, 4])] def get_init_inputs(): return [4]
LinearModel
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/yy/cyya3js6wt64vdji3sfisvrqyfvqxwkwqq5mzg5bqjl2crzjs4t3.py # Topologically Sorted Source Nodes: [hidden], Original ATen: [aten.clone] # Source node to ATen node mapping: # hidden => clone # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%select,), kwargs = {}) triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = (xindex // 16) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask) tl.store(out_ptr0 + (x2), tmp0, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [hidden], Original ATen: [aten.clone] stream0 = get_raw_stream(0) triton_poi_fused_clone_0.run(primals_1, buf0, 64, grid=grid(64), stream=stream0) del primals_1 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [output], Original ATen: [aten.addmm] extern_kernels.addmm(primals_3, reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_2 del primals_3 return (reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf0, (16, 4), (4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch class LinearModel(torch.nn.Module): def __init__(self, input_size: 'int', output_size: 'int', dropout: 'float' ): super().__init__() self.linear = torch.nn.Linear(input_size, output_size) self.dropout = torch.nn.Dropout(dropout) def forward(self, data): data = data[:, 0, :] hidden = self.dropout(data) output = self.linear(hidden) return output def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_size': 4, 'output_size': 4, 'dropout': 0.5}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tl.store(out_ptr0 + x2, tmp0, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(64)](primals_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_1 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_3, reinterpret_tensor(buf0, (16, 4), ( 4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_2 del primals_3 return reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0 ), reinterpret_tensor(buf0, (16, 4), (4, 1), 0) class LinearModelNew(torch.nn.Module): def __init__(self, input_size: 'int', output_size: 'int', dropout: 'float' ): super().__init__() self.linear = torch.nn.Linear(input_size, output_size) self.dropout = torch.nn.Dropout(dropout) def forward(self, input_0): primals_2 = self.linear.weight primals_3 = self.linear.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
TDteach/SEAM
LinearModel
false
1,126
[ "MIT" ]
0
231447dad15403e7620adcf6629b6e7fccc4b809
https://github.com/TDteach/SEAM/tree/231447dad15403e7620adcf6629b6e7fccc4b809
import torch class Model(torch.nn.Module): def __init__(self, input_size: 'int', output_size: 'int', dropout: 'float' ): super().__init__() self.linear = torch.nn.Linear(input_size, output_size) self.dropout = torch.nn.Dropout(dropout) def forward(self, data): data = data[:, 0, :] hidden = self.dropout(data) output = self.linear(hidden) return output def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 4, 0.5]
GeometricMean
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/53/c536dq7bncdpsnuyp7jtn2kgbqzlbq4i6q7p4bjliwwyfzwkz7sq.py # Topologically Sorted Source Nodes: [relu, log_x, mean, exp], Original ATen: [aten.relu, aten.log, aten.mean, aten.exp] # Source node to ATen node mapping: # exp => exp # log_x => log # mean => mean # relu => relu # Graph fragment: # %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%arg0_1,), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%relu,), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%log, [4]), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%mean,), kwargs = {}) triton_poi_fused_exp_log_mean_relu_0 = async_compile.triton('triton_poi_fused_exp_log_mean_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_exp_log_mean_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_exp_log_mean_relu_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp3 = tl_math.log(tmp2) tmp5 = triton_helpers.maximum(tmp1, tmp4) tmp6 = tl_math.log(tmp5) tmp7 = tmp3 + tmp6 tmp9 = triton_helpers.maximum(tmp1, tmp8) tmp10 = tl_math.log(tmp9) tmp11 = tmp7 + tmp10 tmp13 = triton_helpers.maximum(tmp1, tmp12) tmp14 = tl_math.log(tmp13) tmp15 = tmp11 + tmp14 tmp16 = 4.0 tmp17 = tmp15 / tmp16 tmp18 = tl_math.exp(tmp17) tl.store(out_ptr0 + (x0), tmp18, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [relu, log_x, mean, exp], Original ATen: [aten.relu, aten.log, aten.mean, aten.exp] stream0 = get_raw_stream(0) triton_poi_fused_exp_log_mean_relu_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F class GeometricMean(torch.nn.Module): def __init__(self, dim): super().__init__() self.dim = dim def forward(self, x): log_x = torch.log(F.relu(x)) return torch.exp(torch.mean(log_x, dim=self.dim)) def get_inputs(): return [torch.rand([4, 4, 4, 4, 4])] def get_init_inputs(): return [[], {'dim': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_exp_log_mean_relu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp3 = tl_math.log(tmp2) tmp5 = triton_helpers.maximum(tmp1, tmp4) tmp6 = tl_math.log(tmp5) tmp7 = tmp3 + tmp6 tmp9 = triton_helpers.maximum(tmp1, tmp8) tmp10 = tl_math.log(tmp9) tmp11 = tmp7 + tmp10 tmp13 = triton_helpers.maximum(tmp1, tmp12) tmp14 = tl_math.log(tmp13) tmp15 = tmp11 + tmp14 tmp16 = 4.0 tmp17 = tmp15 / tmp16 tmp18 = tl_math.exp(tmp17) tl.store(out_ptr0 + x0, tmp18, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_exp_log_mean_relu_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf0, class GeometricMeanNew(torch.nn.Module): def __init__(self, dim): super().__init__() self.dim = dim def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
Tahlor/glom-pytorch
GeometricMean
false
1,127
[ "MIT" ]
0
45b2fc52af5288cd53611e497a70d53ffa303410
https://github.com/Tahlor/glom-pytorch/tree/45b2fc52af5288cd53611e497a70d53ffa303410
import torch import torch.nn.functional as F class Model(torch.nn.Module): def __init__(self, dim): super().__init__() self.dim = dim def forward(self, x): log_x = torch.log(F.relu(x)) return torch.exp(torch.mean(log_x, dim=self.dim)) def get_inputs(): return [torch.rand([4, 4, 4, 4, 4])] def get_init_inputs(): return [4]
MinibatchStd
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/fe/cfe7ai6nrnajldrnau4a6rtqqrk2wdm7opgj7c5duuavburs6m7o.py # Topologically Sorted Source Nodes: [std, mean, repeat], Original ATen: [aten.std, aten.mean, aten.repeat] # Source node to ATen node mapping: # mean => mean # repeat => repeat # std => sqrt, var # Graph fragment: # %var : [num_users=1] = call_function[target=torch.ops.aten.var.correction](args = (%arg0_1, [0]), kwargs = {correction: 1.0}) # %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%var,), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sqrt,), kwargs = {}) # %repeat : [num_users=1] = call_function[target=torch.ops.aten.repeat.default](args = (%mean, [4, 1, 4, 4]), kwargs = {}) triton_per_fused_mean_repeat_std_0 = async_compile.triton('triton_per_fused_mean_repeat_std_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=(2,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_repeat_std_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_mean_repeat_std_0(in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex r1 = rindex % 16 r2 = (rindex // 16) tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.load(in_ptr0 + (64 + r0), None) tmp3 = tl.load(in_ptr0 + (128 + r0), None) tmp5 = tl.load(in_ptr0 + (192 + r0), None) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = 3.0 tmp21 = tmp19 / tmp20 tmp22 = libdevice.sqrt(tmp21) tmp23 = tl.broadcast_to(tmp22, [XBLOCK, RBLOCK]) tmp25 = tl.sum(tmp23, 1)[:, None] tmp26 = 64.0 tmp27 = tmp25 / tmp26 tl.store(out_ptr1 + (tl.broadcast_to(r1 + (80*r2), [XBLOCK, RBLOCK])), tmp27, None) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/yi/cyidf2yj3fms5jdxlfe7fdijzfj6p5a5q2qxo4llkuxnpqh6fj5o.py # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] # Source node to ATen node mapping: # cat => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%arg0_1, %repeat], 1), kwargs = {}) triton_poi_fused_cat_1 = async_compile.triton('triton_poi_fused_cat_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 x1 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x2), xmask) tl.store(out_ptr0 + (x0 + (80*x1)), tmp0, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf3 = empty_strided_cuda((4, 5, 4, 4), (80, 16, 4, 1), torch.float32) buf2 = reinterpret_tensor(buf3, (4, 1, 4, 4), (80, 16, 4, 1), 64) # alias # Topologically Sorted Source Nodes: [std, mean, repeat], Original ATen: [aten.std, aten.mean, aten.repeat] stream0 = get_raw_stream(0) triton_per_fused_mean_repeat_std_0.run(arg0_1, buf2, 1, 64, grid=grid(1), stream=stream0) buf1 = reinterpret_tensor(buf3, (4, 4, 4, 4), (80, 16, 4, 1), 0) # alias # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] triton_poi_fused_cat_1.run(arg0_1, buf1, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class MinibatchStd(nn.Module): """ calculate minibatch std to avoid mode collapse """ def __init__(self): super(MinibatchStd, self).__init__() def forward(self, x): size = list(x.size()) size[1] = 1 std = torch.std(x, dim=0) mean = torch.mean(std) return torch.cat((x, mean.repeat(size)), dim=1) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_mean_repeat_std_0(in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex r1 = rindex % 16 r2 = rindex // 16 tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr0 + (64 + r0), None) tmp3 = tl.load(in_ptr0 + (128 + r0), None) tmp5 = tl.load(in_ptr0 + (192 + r0), None) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = 3.0 tmp21 = tmp19 / tmp20 tmp22 = libdevice.sqrt(tmp21) tmp23 = tl.broadcast_to(tmp22, [XBLOCK, RBLOCK]) tmp25 = tl.sum(tmp23, 1)[:, None] tmp26 = 64.0 tmp27 = tmp25 / tmp26 tl.store(out_ptr1 + tl.broadcast_to(r1 + 80 * r2, [XBLOCK, RBLOCK]), tmp27, None) @triton.jit def triton_poi_fused_cat_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 x1 = xindex // 64 tmp0 = tl.load(in_ptr0 + x2, xmask) tl.store(out_ptr0 + (x0 + 80 * x1), tmp0, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf3 = empty_strided_cuda((4, 5, 4, 4), (80, 16, 4, 1), torch.float32) buf2 = reinterpret_tensor(buf3, (4, 1, 4, 4), (80, 16, 4, 1), 64) get_raw_stream(0) triton_per_fused_mean_repeat_std_0[grid(1)](arg0_1, buf2, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) buf1 = reinterpret_tensor(buf3, (4, 4, 4, 4), (80, 16, 4, 1), 0) triton_poi_fused_cat_1[grid(256)](arg0_1, buf1, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf3, class MinibatchStdNew(nn.Module): """ calculate minibatch std to avoid mode collapse """ def __init__(self): super(MinibatchStdNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
Tak-jae-ho/RGBD-GAN-pytorch
MinibatchStd
false
1,128
[ "MIT" ]
0
4fb1bc1de7b7807fd4f2d346d9b688a2d257eedb
https://github.com/Tak-jae-ho/RGBD-GAN-pytorch/tree/4fb1bc1de7b7807fd4f2d346d9b688a2d257eedb
import torch import torch.nn as nn class Model(nn.Module): """ calculate minibatch std to avoid mode collapse """ def __init__(self): super().__init__() def forward(self, x): size = list(x.size()) size[1] = 1 std = torch.std(x, dim=0) mean = torch.mean(std) return torch.cat((x, mean.repeat(size)), dim=1) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
PixelwiseNorm
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/ao/caol5mhvrwgvqsgq2tai5vbpmasyjuuavozutxehikyh6g7bgyvg.py # Topologically Sorted Source Nodes: [pow_1, sum_1, add, sqrt, truediv], Original ATen: [aten.pow, aten.sum, aten.add, aten.sqrt, aten.div] # Source node to ATen node mapping: # add => add # pow_1 => pow_1 # sqrt => sqrt # sum_1 => sum_1 # truediv => div # Graph fragment: # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg0_1, 2), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1], True), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_1, 1e-07), kwargs = {}) # %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%add,), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%arg0_1, %sqrt), kwargs = {}) triton_poi_fused_add_div_pow_sqrt_sum_0 = async_compile.triton('triton_poi_fused_add_div_pow_sqrt_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_pow_sqrt_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_pow_sqrt_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = 1e-07 tmp13 = tmp11 + tmp12 tmp14 = libdevice.sqrt(tmp13) tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + (x3), tmp15, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [pow_1, sum_1, add, sqrt, truediv], Original ATen: [aten.pow, aten.sum, aten.add, aten.sqrt, aten.div] stream0 = get_raw_stream(0) triton_poi_fused_add_div_pow_sqrt_sum_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class PixelwiseNorm(nn.Module): """ layer pixelwise normalization """ def __init__(self, eps=1e-07): super(PixelwiseNorm, self).__init__() self.eps = eps def forward(self, x): return x / torch.sqrt(torch.sum(x ** 2, dim=1, keepdim=True) + self.eps ) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_div_pow_sqrt_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = 1e-07 tmp13 = tmp11 + tmp12 tmp14 = libdevice.sqrt(tmp13) tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + x3, tmp15, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_pow_sqrt_sum_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class PixelwiseNormNew(nn.Module): """ layer pixelwise normalization """ def __init__(self, eps=1e-07): super(PixelwiseNormNew, self).__init__() self.eps = eps def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
Tak-jae-ho/RGBD-GAN-pytorch
PixelwiseNorm
false
1,129
[ "MIT" ]
0
4fb1bc1de7b7807fd4f2d346d9b688a2d257eedb
https://github.com/Tak-jae-ho/RGBD-GAN-pytorch/tree/4fb1bc1de7b7807fd4f2d346d9b688a2d257eedb
import torch import torch.nn as nn class Model(nn.Module): """ layer pixelwise normalization """ def __init__(self, eps=1e-07): super().__init__() self.eps = eps def forward(self, x): return x / torch.sqrt(torch.sum(x ** 2, dim=1, keepdim=True) + self.eps ) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
ConsensusAttention
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/qd/cqdnz7hcxxkadyci2slws6wn222hkkzcp6hb3x3b5ttuny2wks7b.py # Topologically Sorted Source Nodes: [einsum, out], Original ATen: [aten.clone] # Source node to ATen node mapping: # einsum => clone # out => clone_2 # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_2,), kwargs = {memory_format: torch.contiguous_format}) # %clone_2 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_8,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) % 4 x2 = (xindex // 16) % 4 x3 = (xindex // 64) x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (4*x2) + (16*x1) + (64*x3)), xmask) tl.store(out_ptr0 + (x4), tmp0, xmask) tl.store(out_ptr1 + (x4), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/w6/cw6bzt4yaqzfq7jhtvpqzm42cbssdg77yhymaouoko5jz36q3rhl.py # Topologically Sorted Source Nodes: [einsum], Original ATen: [aten.clone] # Source node to ATen node mapping: # einsum => clone_1 # Graph fragment: # %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_3,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_1 = async_compile.triton('triton_poi_fused_clone_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x3 = xindex y2 = (yindex // 16) y4 = yindex % 16 y1 = (yindex // 4) % 4 y5 = yindex tmp0 = tl.load(in_ptr0 + (y4 + (16*x3) + (64*y2)), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + ((4*y1) + (16*x3) + (64*y2)), xmask & ymask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*y1) + (16*x3) + (64*y2)), xmask & ymask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + (4*y1) + (16*x3) + (64*y2)), xmask & ymask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + (4*y1) + (16*x3) + (64*y2)), xmask & ymask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-12 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + (x3 + (4*y5)), tmp15, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/7n/c7nhob3voygbzg73wacfq2bbcquxe5ozjclkx7uda5hunwppje36.py # Topologically Sorted Source Nodes: [attn], Original ATen: [aten._softmax] # Source node to ATen node mapping: # attn => exp # Graph fragment: # %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_3, 1), kwargs = {}) # %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [-1], True), kwargs = {}) # %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {}) # %mul_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_tensor, 0.5), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%mul_tensor_1,), kwargs = {}) triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp3 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = 0.5 tmp16 = tmp14 * tmp15 tmp17 = tl_math.exp(tmp16) tl.store(out_ptr0 + (x2), tmp17, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/ry/cryn7ntc2gpkbfzbre3xh7lffx7zkbskw6oihbzsekkgajmdbki6.py # Topologically Sorted Source Nodes: [attn], Original ATen: [aten._softmax] # Source node to ATen node mapping: # attn => div_1, sum_2 # Graph fragment: # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_2), kwargs = {}) triton_poi_fused__softmax_3 = async_compile.triton('triton_poi_fused__softmax_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4, 1), (64, 16, 4, 1, 1), torch.float32) buf5 = empty_strided_cuda((4, 4, 4, 4, 1), (64, 16, 4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [einsum, out], Original ATen: [aten.clone] stream0 = get_raw_stream(0) triton_poi_fused_clone_0.run(arg0_1, buf0, buf5, 256, grid=grid(256), stream=stream0) buf1 = empty_strided_cuda((4, 4, 4, 4, 1), (64, 16, 4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [einsum], Original ATen: [aten.clone] triton_poi_fused_clone_1.run(arg0_1, buf1, 64, 4, grid=grid(64, 4), stream=stream0) del arg0_1 buf2 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [einsum], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf0, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf1, (16, 4, 4), (16, 4, 1), 0), out=buf2) del buf0 buf3 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf1 # reuse # Topologically Sorted Source Nodes: [attn], Original ATen: [aten._softmax] triton_poi_fused__softmax_2.run(buf2, buf3, 256, grid=grid(256), stream=stream0) buf4 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf2 # reuse # Topologically Sorted Source Nodes: [attn], Original ATen: [aten._softmax] triton_poi_fused__softmax_3.run(buf3, buf4, 256, grid=grid(256), stream=stream0) buf6 = reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1), 0); del buf3 # reuse # Topologically Sorted Source Nodes: [out], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf4, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf5, (16, 4, 4), (16, 4, 1), 0), out=buf6) del buf4 del buf5 return (reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 4, 16, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F from torch import nn from torch import einsum class ConsensusAttention(nn.Module): def __init__(self, num_patches_side, attend_self=True, local_consensus_radius=0): super().__init__() self.attend_self = attend_self self.local_consensus_radius = local_consensus_radius if self.local_consensus_radius > 0: coors = torch.stack(torch.meshgrid(torch.arange( num_patches_side), torch.arange(num_patches_side))).float() coors = rearrange(coors, 'c h w -> (h w) c') dist = torch.cdist(coors, coors) mask_non_local = dist > self.local_consensus_radius mask_non_local = rearrange(mask_non_local, 'i j -> () i j') self.register_buffer('non_local_mask', mask_non_local) def forward(self, levels): _, n, _, d, device = *levels.shape, levels.device q, k, _v = levels, F.normalize(levels, dim=-1), levels sim = einsum('b i l d, b j l d -> b l i j', q, k ) * d ** -0.5 if not self.attend_self: self_mask = torch.eye(n, device=device, dtype=torch.bool) self_mask = rearrange(self_mask, 'i j -> () () i j') sim.masked_fill_(self_mask, TOKEN_ATTEND_SELF_VALUE) if self.local_consensus_radius > 0: max_neg_value = -torch.finfo(sim.dtype).max sim.masked_fill_(self.non_local_mask, max_neg_value) attn = sim.softmax(dim=-1) out = einsum('b l i j, b j l d -> b i l d', attn, levels) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_patches_side': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 % 4 x3 = xindex // 64 x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask) tl.store(out_ptr0 + x4, tmp0, xmask) tl.store(out_ptr1 + x4, tmp0, xmask) @triton.jit def triton_poi_fused_clone_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x3 = xindex y2 = yindex // 16 y4 = yindex % 16 y1 = yindex // 4 % 4 y5 = yindex tmp0 = tl.load(in_ptr0 + (y4 + 16 * x3 + 64 * y2), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (4 * y1 + 16 * x3 + 64 * y2), xmask & ymask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * y1 + 16 * x3 + 64 * y2), xmask & ymask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * y1 + 16 * x3 + 64 * y2), xmask & ymask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * y1 + 16 * x3 + 64 * y2), xmask & ymask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-12 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + (x3 + 4 * y5), tmp15, xmask & ymask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = 0.5 tmp16 = tmp14 * tmp15 tmp17 = tl_math.exp(tmp16) tl.store(out_ptr0 + x2, tmp17, xmask) @triton.jit def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4, 1), (64, 16, 4, 1, 1), torch .float32) buf5 = empty_strided_cuda((4, 4, 4, 4, 1), (64, 16, 4, 1, 1), torch .float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(256)](arg0_1, buf0, buf5, 256, XBLOCK =256, num_warps=4, num_stages=1) buf1 = empty_strided_cuda((4, 4, 4, 4, 1), (64, 16, 4, 1, 1), torch .float32) triton_poi_fused_clone_1[grid(64, 4)](arg0_1, buf1, 64, 4, XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1) del arg0_1 buf2 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf0, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf1, (16, 4, 4), (16, 4, 1), 0), out=buf2) del buf0 buf3 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf1 triton_poi_fused__softmax_2[grid(256)](buf2, buf3, 256, XBLOCK=128, num_warps=4, num_stages=1) buf4 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf2 triton_poi_fused__softmax_3[grid(256)](buf3, buf4, 256, XBLOCK=128, num_warps=4, num_stages=1) buf6 = reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1), 0) del buf3 extern_kernels.bmm(reinterpret_tensor(buf4, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf5, (16, 4, 4), (16, 4, 1), 0), out=buf6) del buf4 del buf5 return reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 4, 16, 1), 0), class ConsensusAttentionNew(nn.Module): def __init__(self, num_patches_side, attend_self=True, local_consensus_radius=0): super().__init__() self.attend_self = attend_self self.local_consensus_radius = local_consensus_radius if self.local_consensus_radius > 0: coors = torch.stack(torch.meshgrid(torch.arange( num_patches_side), torch.arange(num_patches_side))).float() coors = rearrange(coors, 'c h w -> (h w) c') dist = torch.cdist(coors, coors) mask_non_local = dist > self.local_consensus_radius mask_non_local = rearrange(mask_non_local, 'i j -> () i j') self.register_buffer('non_local_mask', mask_non_local) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
Tahlor/glom-pytorch
ConsensusAttention
false
1,130
[ "MIT" ]
0
45b2fc52af5288cd53611e497a70d53ffa303410
https://github.com/Tahlor/glom-pytorch/tree/45b2fc52af5288cd53611e497a70d53ffa303410
import torch import torch.nn.functional as F from torch import nn from torch import einsum class Model(nn.Module): def __init__(self, num_patches_side, attend_self=True, local_consensus_radius=0): super().__init__() self.attend_self = attend_self self.local_consensus_radius = local_consensus_radius if self.local_consensus_radius > 0: coors = torch.stack(torch.meshgrid(torch.arange( num_patches_side), torch.arange(num_patches_side))).float() coors = rearrange(coors, 'c h w -> (h w) c') dist = torch.cdist(coors, coors) mask_non_local = dist > self.local_consensus_radius mask_non_local = rearrange(mask_non_local, 'i j -> () i j') self.register_buffer('non_local_mask', mask_non_local) def forward(self, levels): _, n, _, d, device = *levels.shape, levels.device q, k, _v = levels, F.normalize(levels, dim=-1), levels sim = einsum('b i l d, b j l d -> b l i j', q, k ) * d ** -0.5 if not self.attend_self: self_mask = torch.eye(n, device=device, dtype=torch.bool) self_mask = rearrange(self_mask, 'i j -> () () i j') sim.masked_fill_(self_mask, TOKEN_ATTEND_SELF_VALUE) if self.local_consensus_radius > 0: max_neg_value = -torch.finfo(sim.dtype).max sim.masked_fill_(self.non_local_mask, max_neg_value) attn = sim.softmax(dim=-1) out = einsum('b l i j, b j l d -> b i l d', attn, levels) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4]
DenseCrossEntropy
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/nr/cnrkptzsuv7qm3ss6i6xgoxkou23z76h2vmwqkwz2zkgpdbxhedc.py # Topologically Sorted Source Nodes: [logprobs], Original ATen: [aten._log_softmax] # Source node to ATen node mapping: # logprobs => amax, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%arg0_1, [-1], True), kwargs = {}) # %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %amax), kwargs = {}) triton_poi_fused__log_softmax_0 = async_compile.triton('triton_poi_fused__log_softmax_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/nb/cnbdxy34iv6vkig4bfuqrxbegug3ek6lhyugevz3qctt7efdvtge.py # Topologically Sorted Source Nodes: [logprobs, neg, loss, loss_1, mean], Original ATen: [aten._log_softmax, aten.neg, aten.mul, aten.sum, aten.mean] # Source node to ATen node mapping: # logprobs => exp, log, sub_1, sum_1 # loss => mul # loss_1 => sum_2 # mean => mean # neg => neg # Graph fragment: # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%sub_1,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%neg, %arg1_1), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [-1]), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sum_2,), kwargs = {}) triton_per_fused__log_softmax_mean_mul_neg_sum_1 = async_compile.triton('triton_per_fused__log_softmax_mean_mul_neg_sum_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 64], reduction_hint=ReductionHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__log_softmax_mean_mul_neg_sum_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__log_softmax_mean_mul_neg_sum_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (4*r0), None, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*r0)), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (2 + (4*r0)), None, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (3 + (4*r0)), None, eviction_policy='evict_last') tmp14 = tl.load(in_ptr1 + (4*r0), None, eviction_policy='evict_last') tmp18 = tl.load(in_ptr1 + (1 + (4*r0)), None, eviction_policy='evict_last') tmp23 = tl.load(in_ptr1 + (2 + (4*r0)), None, eviction_policy='evict_last') tmp28 = tl.load(in_ptr1 + (3 + (4*r0)), None, eviction_policy='evict_last') tmp1 = tl_math.exp(tmp0) tmp3 = tl_math.exp(tmp2) tmp4 = tmp1 + tmp3 tmp6 = tl_math.exp(tmp5) tmp7 = tmp4 + tmp6 tmp9 = tl_math.exp(tmp8) tmp10 = tmp7 + tmp9 tmp11 = tl_math.log(tmp10) tmp12 = tmp0 - tmp11 tmp13 = -tmp12 tmp15 = tmp13 * tmp14 tmp16 = tmp2 - tmp11 tmp17 = -tmp16 tmp19 = tmp17 * tmp18 tmp20 = tmp15 + tmp19 tmp21 = tmp5 - tmp11 tmp22 = -tmp21 tmp24 = tmp22 * tmp23 tmp25 = tmp20 + tmp24 tmp26 = tmp8 - tmp11 tmp27 = -tmp26 tmp29 = tmp27 * tmp28 tmp30 = tmp25 + tmp29 tmp31 = tl.broadcast_to(tmp30, [XBLOCK, RBLOCK]) tmp33 = tl.sum(tmp31, 1)[:, None] tmp34 = 64.0 tmp35 = tmp33 / tmp34 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp35, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [logprobs], Original ATen: [aten._log_softmax] stream0 = get_raw_stream(0) triton_poi_fused__log_softmax_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 buf2 = empty_strided_cuda((), (), torch.float32) buf3 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [logprobs, neg, loss, loss_1, mean], Original ATen: [aten._log_softmax, aten.neg, aten.mul, aten.sum, aten.mean] triton_per_fused__log_softmax_mean_mul_neg_sum_1.run(buf3, buf0, arg1_1, 1, 64, grid=grid(1), stream=stream0) del arg1_1 del buf0 return (buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.utils.data import torch.nn.parallel class DenseCrossEntropy(nn.Module): def forward(self, x, target): x = x.float() target = target.float() logprobs = torch.nn.functional.log_softmax(x, dim=-1) loss = -logprobs * target loss = loss.sum(-1) return loss.mean() def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch.utils.data import torch.nn.parallel assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_per_fused__log_softmax_mean_mul_neg_sum_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + 4 * r0, None, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * r0), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (2 + 4 * r0), None, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (3 + 4 * r0), None, eviction_policy='evict_last') tmp14 = tl.load(in_ptr1 + 4 * r0, None, eviction_policy='evict_last') tmp18 = tl.load(in_ptr1 + (1 + 4 * r0), None, eviction_policy='evict_last') tmp23 = tl.load(in_ptr1 + (2 + 4 * r0), None, eviction_policy='evict_last') tmp28 = tl.load(in_ptr1 + (3 + 4 * r0), None, eviction_policy='evict_last') tmp1 = tl_math.exp(tmp0) tmp3 = tl_math.exp(tmp2) tmp4 = tmp1 + tmp3 tmp6 = tl_math.exp(tmp5) tmp7 = tmp4 + tmp6 tmp9 = tl_math.exp(tmp8) tmp10 = tmp7 + tmp9 tmp11 = tl_math.log(tmp10) tmp12 = tmp0 - tmp11 tmp13 = -tmp12 tmp15 = tmp13 * tmp14 tmp16 = tmp2 - tmp11 tmp17 = -tmp16 tmp19 = tmp17 * tmp18 tmp20 = tmp15 + tmp19 tmp21 = tmp5 - tmp11 tmp22 = -tmp21 tmp24 = tmp22 * tmp23 tmp25 = tmp20 + tmp24 tmp26 = tmp8 - tmp11 tmp27 = -tmp26 tmp29 = tmp27 * tmp28 tmp30 = tmp25 + tmp29 tmp31 = tl.broadcast_to(tmp30, [XBLOCK, RBLOCK]) tmp33 = tl.sum(tmp31, 1)[:, None] tmp34 = 64.0 tmp35 = tmp33 / tmp34 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp35, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__log_softmax_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 buf2 = empty_strided_cuda((), (), torch.float32) buf3 = buf2 del buf2 triton_per_fused__log_softmax_mean_mul_neg_sum_1[grid(1)](buf3, buf0, arg1_1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg1_1 del buf0 return buf3, class DenseCrossEntropyNew(nn.Module): def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
Tanmengxuan/Google-Landmark-Recognition-2020-3rd-Place-Solution
DenseCrossEntropy
false
1,131
[ "Apache-2.0" ]
0
8e2d9056d5c88c6415827086809e73522b336fbb
https://github.com/Tanmengxuan/Google-Landmark-Recognition-2020-3rd-Place-Solution/tree/8e2d9056d5c88c6415827086809e73522b336fbb
import torch import torch.nn as nn import torch.utils.data import torch.nn.parallel class Model(nn.Module): def forward(self, x, target): x = x.float() target = target.float() logprobs = torch.nn.functional.log_softmax(x, dim=-1) loss = -logprobs * target loss = loss.sum(-1) return loss.mean() def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
HalfMSELoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/wj/cwj2vuvocpmuwf7kyymuynpm2f23fwqwm7fmbyjxaws24h4obhbj.py # Topologically Sorted Source Nodes: [mse_loss, truediv], Original ATen: [aten.mse_loss, aten.div] # Source node to ATen node mapping: # mse_loss => mean, pow_1, sub # truediv => div # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %arg0_1), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%pow_1,), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mean, 2), kwargs = {}) triton_per_fused_div_mse_loss_0 = async_compile.triton('triton_per_fused_div_mse_loss_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_div_mse_loss_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_div_mse_loss_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.load(in_ptr1 + (r0), None) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp4 = tl.broadcast_to(tmp3, [RBLOCK]) tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0)) tmp7 = 256.0 tmp8 = tmp6 / tmp7 tmp9 = 0.5 tmp10 = tmp8 * tmp9 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp10, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [mse_loss, truediv], Original ATen: [aten.mse_loss, aten.div] stream0 = get_raw_stream(0) triton_per_fused_div_mse_loss_0.run(buf1, arg1_1, arg0_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch.nn.modules.loss import MSELoss class HalfMSELoss(MSELoss): def __init__(self, reduction='mean'): super().__init__(reduction=reduction) def forward(self, input, target): return super().forward(input, target) / 2 def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch.nn.modules.loss import MSELoss assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_div_mse_loss_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp4 = tl.broadcast_to(tmp3, [RBLOCK]) tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0)) tmp7 = 256.0 tmp8 = tmp6 / tmp7 tmp9 = 0.5 tmp10 = tmp8 * tmp9 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp10, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_div_mse_loss_0[grid(1)](buf1, arg1_1, arg0_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class HalfMSELossNew(MSELoss): def __init__(self, reduction='mean'): super().__init__(reduction=reduction) def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
ThayaFluss/candle
HalfMSELoss
false
1,132
[ "MIT" ]
0
4a12fde60ffbbf0cb688617fee81aded94c0b613
https://github.com/ThayaFluss/candle/tree/4a12fde60ffbbf0cb688617fee81aded94c0b613
import torch from torch.nn.modules.loss import MSELoss class Model(MSELoss): def __init__(self, reduction='mean'): super().__init__(reduction=reduction) def forward(self, input, target): return super().forward(input, target) / 2 def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
EqualLinear
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/fl/cflw6zjzdk2wqtau7m6nsei5vavjfijzxhb37zaa3xp4yxpw5yb2.py # Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul] # Source node to ATen node mapping: # mul => mul # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, 1), kwargs = {}) triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/2o/c2oqkq7zaubqmw7vuixxlseb2ff5jzqqbyczicxlmsahuxwdpdyp.py # Topologically Sorted Source Nodes: [mul_1], Original ATen: [aten.mul] # Source node to ATen node mapping: # mul_1 => mul_1 # Graph fragment: # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, 1), kwargs = {}) triton_poi_fused_mul_1 = async_compile.triton('triton_poi_fused_mul_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_mul_0.run(primals_1, buf0, 16, grid=grid(16), stream=stream0) del primals_1 buf1 = empty_strided_cuda((4, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [mul_1], Original ATen: [aten.mul] triton_poi_fused_mul_1.run(primals_2, buf1, 4, grid=grid(4), stream=stream0) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul_1, linear], Original ATen: [aten.mul, aten.addmm] extern_kernels.addmm(buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf0, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del buf0 del buf1 return (reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn import torch.nn.functional as F class EqualLinear(nn.Module): def __init__(self, in_dim, out_dim, lr_mul=1, bias=True): super().__init__() self.weight = nn.Parameter(torch.randn(out_dim, in_dim)) if bias: self.bias = nn.Parameter(torch.zeros(out_dim)) self.lr_mul = lr_mul def forward(self, input): return F.linear(input, self.weight * self.lr_mul, bias=self.bias * self.lr_mul) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_dim': 4, 'out_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_mul_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(16)](primals_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4,), (1,), torch.float32) triton_poi_fused_mul_1[grid(4)](primals_2, buf1, 4, XBLOCK=4, num_warps=1, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(buf1, reinterpret_tensor(primals_3, (64, 4), ( 4, 1), 0), reinterpret_tensor(buf0, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del buf0 del buf1 return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0) class EqualLinearNew(nn.Module): def __init__(self, in_dim, out_dim, lr_mul=1, bias=True): super().__init__() self.weight = nn.Parameter(torch.randn(out_dim, in_dim)) if bias: self.bias = nn.Parameter(torch.zeros(out_dim)) self.lr_mul = lr_mul def forward(self, input_0): primals_1 = self.weight primals_2 = self.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
TheSignPainter/AGGAN
EqualLinear
false
1,133
[ "Apache-2.0" ]
0
d75144f81df3f5a0a761d48c6285c38e74002be3
https://github.com/TheSignPainter/AGGAN/tree/d75144f81df3f5a0a761d48c6285c38e74002be3
import torch from torch import nn import torch.nn.functional as F class Model(nn.Module): def __init__(self, in_dim, out_dim, lr_mul=1, bias=True): super().__init__() self.weight = nn.Parameter(torch.randn(out_dim, in_dim)) if bias: self.bias = nn.Parameter(torch.zeros(out_dim)) self.lr_mul = lr_mul def forward(self, input): return F.linear(input, self.weight * self.lr_mul, bias=self.bias * self.lr_mul) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 4]
SuperPointNet
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/pn/cpng7gl7lqxvqafyqlu5mbr4lc7m2sgi4l5ulbiv46djlkgyencv.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4096, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 4096 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = (yindex // 64) tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (64*x2) + (576*y1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/ne/cnepmjd66uu3laeexeusfxab3aayptiri2wp2knrgtgmx52tvzxj.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8192, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 8192 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = (yindex // 64) tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (64*x2) + (576*y1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/ba/cbayuw2by4w6xwduhs5qdriinmydiep6bpw7fyi37s377up7lrcm.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_2 = async_compile.triton('triton_poi_fused_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16384, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16384 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 128 y1 = (yindex // 128) tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (128*x2) + (1152*y1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/yd/cydvmxsmzwizyj5fbgjnjeeo27as6zdlft5s5uj57ovvcxtlbfhh.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_3 = async_compile.triton('triton_poi_fused_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32768, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 32768 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 128 y1 = (yindex // 128) tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (128*x2) + (1152*y1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/vn/cvnnodtrripz7gtommdv4wbjjfexefcdjq3t2xglmrxcj2g7mll4.py # Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d => convolution # x => relu # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_convolution_relu_4 = async_compile.triton('triton_poi_fused_convolution_relu_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256, 4096], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_4(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 256 xnumel = 4096 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 64 y1 = (yindex // 64) tmp0 = tl.load(in_ptr0 + (x2 + (4096*y3)), ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1, 1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(out_ptr0 + (y0 + (64*x2) + (262144*y1)), tmp4, ymask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/7c/c7caq6stn5xhqphn2xnmwbpvxspyfj5wahntqw4tlpltw5xu6ktg.py # Topologically Sorted Source Nodes: [conv2d_1, x_1], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d_1 => convolution_1 # x_1 => relu_1 # Graph fragment: # %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {}) triton_poi_fused_convolution_relu_5 = async_compile.triton('triton_poi_fused_convolution_relu_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1048576], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1048576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/p7/cp7hkfs7dzspvks5o4gggcw3s4o5jb3vqo372n6r4xcl5tx3xupa.py # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # x_2 => getitem, getitem_1 # Graph fragment: # %getitem : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {}) # %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_6 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[262144], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_6(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 262144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex % 64 x1 = (xindex // 64) % 32 x2 = (xindex // 2048) x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (128*x1) + (8192*x2)), None) tmp1 = tl.load(in_ptr0 + (64 + x0 + (128*x1) + (8192*x2)), None) tmp3 = tl.load(in_ptr0 + (4096 + x0 + (128*x1) + (8192*x2)), None) tmp5 = tl.load(in_ptr0 + (4160 + x0 + (128*x1) + (8192*x2)), None) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x3), tmp6, None) tl.store(out_ptr1 + (x3), tmp16, None) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/ya/cya72dxxug7bvahrgkiz2tev6wxbq4aissg3wd3pl37yen37nb4b.py # Topologically Sorted Source Nodes: [conv2d_2, x_3], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d_2 => convolution_2 # x_3 => relu_2 # Graph fragment: # %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem, %primals_6, %primals_7, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_2,), kwargs = {}) triton_poi_fused_convolution_relu_7 = async_compile.triton('triton_poi_fused_convolution_relu_7', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[262144], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_7', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 262144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/xj/cxjptd7j2qxrb3kjd7zlgxmewdvhhkbw3tukgvay2kmhnxcajkzw.py # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # x_5 => getitem_2, getitem_3 # Graph fragment: # %getitem_2 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 0), kwargs = {}) # %getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_8 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_8', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_8(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 65536 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex % 64 x1 = (xindex // 64) % 16 x2 = (xindex // 1024) x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (128*x1) + (4096*x2)), None) tmp1 = tl.load(in_ptr0 + (64 + x0 + (128*x1) + (4096*x2)), None) tmp3 = tl.load(in_ptr0 + (2048 + x0 + (128*x1) + (4096*x2)), None) tmp5 = tl.load(in_ptr0 + (2112 + x0 + (128*x1) + (4096*x2)), None) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x3), tmp6, None) tl.store(out_ptr1 + (x3), tmp16, None) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/v5/cv5bres457ho44iaqr63mi3bbgzezc3pxml5sotwsljao2g5whrl.py # Topologically Sorted Source Nodes: [conv2d_4, x_6], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d_4 => convolution_4 # x_6 => relu_4 # Graph fragment: # %convolution_4 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_2, %primals_10, %primals_11, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_4 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_4,), kwargs = {}) triton_poi_fused_convolution_relu_9 = async_compile.triton('triton_poi_fused_convolution_relu_9', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[131072], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_9', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_9(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 131072 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/sr/csr7z2afgh7gbn3y7lq2wp2sva4b7imt3iniu36uxe33zsilt4x7.py # Topologically Sorted Source Nodes: [x_8], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # x_8 => getitem_4, getitem_5 # Graph fragment: # %getitem_4 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_2, 0), kwargs = {}) # %getitem_5 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_2, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_10 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_10', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32768], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_10', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_10(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 32768 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex % 128 x1 = (xindex // 128) % 8 x2 = (xindex // 1024) x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (256*x1) + (4096*x2)), None) tmp1 = tl.load(in_ptr0 + (128 + x0 + (256*x1) + (4096*x2)), None) tmp3 = tl.load(in_ptr0 + (2048 + x0 + (256*x1) + (4096*x2)), None) tmp5 = tl.load(in_ptr0 + (2176 + x0 + (256*x1) + (4096*x2)), None) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x3), tmp6, None) tl.store(out_ptr1 + (x3), tmp16, None) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/g6/cg6bxfpbjt7yt2cidmobe46sxen6spgw4gul66mxxotjhzxvzddf.py # Topologically Sorted Source Nodes: [conv2d_6, x_9], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d_6 => convolution_6 # x_9 => relu_6 # Graph fragment: # %convolution_6 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_4, %primals_14, %primals_15, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_6 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_6,), kwargs = {}) triton_poi_fused_convolution_relu_11 = async_compile.triton('triton_poi_fused_convolution_relu_11', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32768], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_11', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_11(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 32768 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/l5/cl5g2w4v5osla5sy5d2al2y6dspf2ipfcnmfehdgyecqjlqqwxp5.py # Topologically Sorted Source Nodes: [conv2d_8, cPa], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # cPa => relu_8 # conv2d_8 => convolution_8 # Graph fragment: # %convolution_8 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_7, %primals_18, %primals_19, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_8 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_8,), kwargs = {}) triton_poi_fused_convolution_relu_12 = async_compile.triton('triton_poi_fused_convolution_relu_12', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_12', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_12(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 65536 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/q3/cq35pbgnsnzjuhpsen4p6ua7wlqsoqqkc5hvjqqupede7xjns4pl.py # Topologically Sorted Source Nodes: [semi], Original ATen: [aten.convolution] # Source node to ATen node mapping: # semi => convolution_9 # Graph fragment: # %convolution_9 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_8, %primals_20, %primals_21, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_13 = async_compile.triton('triton_poi_fused_convolution_13', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512, 64], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_13', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_13(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 260 xnumel = 64 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 65 y1 = (yindex // 65) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (65*x2) + (4160*y1)), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + (64*y3)), tmp2, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/le/clesx3fq4nvfilcvyfxgg66sqkyn3nl3mexlek76x5apn2ediyvi.py # Topologically Sorted Source Nodes: [desc, dn], Original ATen: [aten.convolution, aten.linalg_vector_norm] # Source node to ATen node mapping: # desc => convolution_11 # dn => pow_1, pow_2, sum_1 # Graph fragment: # %convolution_11 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_9, %primals_24, %primals_25, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%convolution_11, 2), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1]), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {}) triton_per_fused_convolution_linalg_vector_norm_14 = async_compile.triton('triton_per_fused_convolution_linalg_vector_norm_14', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[256, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_convolution_linalg_vector_norm_14', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_convolution_linalg_vector_norm_14(in_out_ptr0, in_out_ptr1, in_ptr0, xnumel, rnumel): xnumel = 256 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_out_ptr0 + (r1 + (256*x0)), None) tmp1 = tl.load(in_ptr0 + (r1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tmp2 * tmp2 tmp4 = tl.broadcast_to(tmp3, [RBLOCK]) tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0)) tmp7 = libdevice.sqrt(tmp6) tl.store(in_out_ptr0 + (r1 + (256*x0)), tmp2, None) tl.debug_barrier() tl.store(in_out_ptr1 + (x0), tmp7, None) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/f4/cf4ceh57l4kazdvaqbryn4i5xohp6gmuuxrk5bvsv4ct3wlef3om.py # Topologically Sorted Source Nodes: [desc_1], Original ATen: [aten.div] # Source node to ATen node mapping: # desc_1 => div # Graph fragment: # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%convolution_11, %unsqueeze), kwargs = {}) triton_poi_fused_div_15 = async_compile.triton('triton_poi_fused_div_15', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024, 64], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_15', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_15(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 1024 xnumel = 64 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 256 y1 = (yindex // 256) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (256*x2) + (16384*y1)), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x2 + (64*y1)), xmask, eviction_policy='evict_last') tmp2 = tmp0 / tmp1 tl.store(out_ptr0 + (x2 + (64*y3)), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25 = args args.clear() assert_size_stride(primals_1, (64, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_2, (64, ), (1, )) assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1)) assert_size_stride(primals_4, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_5, (64, ), (1, )) assert_size_stride(primals_6, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_7, (64, ), (1, )) assert_size_stride(primals_8, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_9, (64, ), (1, )) assert_size_stride(primals_10, (128, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_11, (128, ), (1, )) assert_size_stride(primals_12, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_13, (128, ), (1, )) assert_size_stride(primals_14, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_15, (128, ), (1, )) assert_size_stride(primals_16, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_17, (128, ), (1, )) assert_size_stride(primals_18, (256, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_19, (256, ), (1, )) assert_size_stride(primals_20, (65, 256, 1, 1), (256, 1, 1, 1)) assert_size_stride(primals_21, (65, ), (1, )) assert_size_stride(primals_22, (256, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_23, (256, ), (1, )) assert_size_stride(primals_24, (256, 256, 1, 1), (256, 1, 1, 1)) assert_size_stride(primals_25, (256, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 64, 3, 3), (576, 1, 192, 64), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] stream0 = get_raw_stream(0) triton_poi_fused_0.run(primals_4, buf0, 4096, 9, grid=grid(4096, 9), stream=stream0) del primals_4 buf1 = empty_strided_cuda((64, 64, 3, 3), (576, 1, 192, 64), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_0.run(primals_6, buf1, 4096, 9, grid=grid(4096, 9), stream=stream0) del primals_6 buf2 = empty_strided_cuda((64, 64, 3, 3), (576, 1, 192, 64), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_0.run(primals_8, buf2, 4096, 9, grid=grid(4096, 9), stream=stream0) del primals_8 buf3 = empty_strided_cuda((128, 64, 3, 3), (576, 1, 192, 64), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_1.run(primals_10, buf3, 8192, 9, grid=grid(8192, 9), stream=stream0) del primals_10 buf4 = empty_strided_cuda((128, 128, 3, 3), (1152, 1, 384, 128), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_2.run(primals_12, buf4, 16384, 9, grid=grid(16384, 9), stream=stream0) del primals_12 buf5 = empty_strided_cuda((128, 128, 3, 3), (1152, 1, 384, 128), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_2.run(primals_14, buf5, 16384, 9, grid=grid(16384, 9), stream=stream0) del primals_14 buf6 = empty_strided_cuda((128, 128, 3, 3), (1152, 1, 384, 128), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_2.run(primals_16, buf6, 16384, 9, grid=grid(16384, 9), stream=stream0) del primals_16 buf7 = empty_strided_cuda((256, 128, 3, 3), (1152, 1, 384, 128), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_3.run(primals_18, buf7, 32768, 9, grid=grid(32768, 9), stream=stream0) del primals_18 buf8 = empty_strided_cuda((256, 128, 3, 3), (1152, 1, 384, 128), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_3.run(primals_22, buf8, 32768, 9, grid=grid(32768, 9), stream=stream0) del primals_22 # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf9 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf9, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf10 = empty_strided_cuda((4, 64, 64, 64), (262144, 1, 4096, 64), torch.float32) # Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_4.run(buf9, primals_2, buf10, 256, 4096, grid=grid(256, 4096), stream=stream0) del buf9 del primals_2 # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] buf11 = extern_kernels.convolution(buf10, buf0, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf11, (4, 64, 64, 64), (262144, 1, 4096, 64)) buf12 = buf11; del buf11 # reuse # Topologically Sorted Source Nodes: [conv2d_1, x_1], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_5.run(buf12, primals_5, 1048576, grid=grid(1048576), stream=stream0) del primals_5 buf13 = empty_strided_cuda((4, 64, 32, 32), (65536, 1, 2048, 64), torch.float32) buf14 = empty_strided_cuda((4, 64, 32, 32), (65536, 1, 2048, 64), torch.int8) # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_6.run(buf12, buf13, buf14, 262144, grid=grid(262144), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution] buf15 = extern_kernels.convolution(buf13, buf1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf15, (4, 64, 32, 32), (65536, 1, 2048, 64)) buf16 = buf15; del buf15 # reuse # Topologically Sorted Source Nodes: [conv2d_2, x_3], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_7.run(buf16, primals_7, 262144, grid=grid(262144), stream=stream0) del primals_7 # Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution] buf17 = extern_kernels.convolution(buf16, buf2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf17, (4, 64, 32, 32), (65536, 1, 2048, 64)) buf18 = buf17; del buf17 # reuse # Topologically Sorted Source Nodes: [conv2d_3, x_4], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_7.run(buf18, primals_9, 262144, grid=grid(262144), stream=stream0) del primals_9 buf19 = empty_strided_cuda((4, 64, 16, 16), (16384, 1, 1024, 64), torch.float32) buf20 = empty_strided_cuda((4, 64, 16, 16), (16384, 1, 1024, 64), torch.int8) # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_8.run(buf18, buf19, buf20, 65536, grid=grid(65536), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_4], Original ATen: [aten.convolution] buf21 = extern_kernels.convolution(buf19, buf3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf21, (4, 128, 16, 16), (32768, 1, 2048, 128)) buf22 = buf21; del buf21 # reuse # Topologically Sorted Source Nodes: [conv2d_4, x_6], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_9.run(buf22, primals_11, 131072, grid=grid(131072), stream=stream0) del primals_11 # Topologically Sorted Source Nodes: [conv2d_5], Original ATen: [aten.convolution] buf23 = extern_kernels.convolution(buf22, buf4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf23, (4, 128, 16, 16), (32768, 1, 2048, 128)) buf24 = buf23; del buf23 # reuse # Topologically Sorted Source Nodes: [conv2d_5, x_7], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_9.run(buf24, primals_13, 131072, grid=grid(131072), stream=stream0) del primals_13 buf25 = empty_strided_cuda((4, 128, 8, 8), (8192, 1, 1024, 128), torch.float32) buf26 = empty_strided_cuda((4, 128, 8, 8), (8192, 1, 1024, 128), torch.int8) # Topologically Sorted Source Nodes: [x_8], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_10.run(buf24, buf25, buf26, 32768, grid=grid(32768), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_6], Original ATen: [aten.convolution] buf27 = extern_kernels.convolution(buf25, buf5, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf27, (4, 128, 8, 8), (8192, 1, 1024, 128)) buf28 = buf27; del buf27 # reuse # Topologically Sorted Source Nodes: [conv2d_6, x_9], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_11.run(buf28, primals_15, 32768, grid=grid(32768), stream=stream0) del primals_15 # Topologically Sorted Source Nodes: [conv2d_7], Original ATen: [aten.convolution] buf29 = extern_kernels.convolution(buf28, buf6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf29, (4, 128, 8, 8), (8192, 1, 1024, 128)) buf30 = buf29; del buf29 # reuse # Topologically Sorted Source Nodes: [conv2d_7, x_10], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_11.run(buf30, primals_17, 32768, grid=grid(32768), stream=stream0) del primals_17 # Topologically Sorted Source Nodes: [conv2d_8], Original ATen: [aten.convolution] buf31 = extern_kernels.convolution(buf30, buf7, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf31, (4, 256, 8, 8), (16384, 1, 2048, 256)) buf32 = buf31; del buf31 # reuse # Topologically Sorted Source Nodes: [conv2d_8, cPa], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_12.run(buf32, primals_19, 65536, grid=grid(65536), stream=stream0) del primals_19 # Topologically Sorted Source Nodes: [semi], Original ATen: [aten.convolution] buf33 = extern_kernels.convolution(buf32, primals_20, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf33, (4, 65, 8, 8), (4160, 1, 520, 65)) buf34 = empty_strided_cuda((4, 65, 8, 8), (4160, 64, 8, 1), torch.float32) # Topologically Sorted Source Nodes: [semi], Original ATen: [aten.convolution] triton_poi_fused_convolution_13.run(buf33, primals_21, buf34, 260, 64, grid=grid(260, 64), stream=stream0) del buf33 del primals_21 # Topologically Sorted Source Nodes: [conv2d_10], Original ATen: [aten.convolution] buf35 = extern_kernels.convolution(buf30, buf8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf35, (4, 256, 8, 8), (16384, 1, 2048, 256)) buf36 = buf35; del buf35 # reuse # Topologically Sorted Source Nodes: [conv2d_10, cDa], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_12.run(buf36, primals_23, 65536, grid=grid(65536), stream=stream0) del primals_23 # Topologically Sorted Source Nodes: [desc], Original ATen: [aten.convolution] buf37 = extern_kernels.convolution(buf36, primals_24, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf37, (4, 256, 8, 8), (16384, 1, 2048, 256)) buf38 = buf37; del buf37 # reuse buf39 = empty_strided_cuda((4, 8, 8), (64, 8, 1), torch.float32) buf40 = buf39; del buf39 # reuse # Topologically Sorted Source Nodes: [desc, dn], Original ATen: [aten.convolution, aten.linalg_vector_norm] triton_per_fused_convolution_linalg_vector_norm_14.run(buf38, buf40, primals_25, 256, 256, grid=grid(256), stream=stream0) del primals_25 buf41 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch.float32) # Topologically Sorted Source Nodes: [desc_1], Original ATen: [aten.div] triton_poi_fused_div_15.run(buf38, buf40, buf41, 1024, 64, grid=grid(1024, 64), stream=stream0) return (buf34, buf41, primals_1, primals_3, buf0, buf1, buf2, buf3, buf4, buf5, buf6, buf7, primals_20, buf8, primals_24, buf10, buf12, buf13, buf14, buf16, buf18, buf19, buf20, buf22, buf24, buf25, buf26, buf28, buf30, buf32, buf36, buf38, reinterpret_tensor(buf40, (4, 1, 8, 8), (64, 64, 8, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((64, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 1, 64, 64), (4096, 4096, 64, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((128, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_13 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_14 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_15 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_16 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_17 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_18 = rand_strided((256, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_19 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_20 = rand_strided((65, 256, 1, 1), (256, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_21 = rand_strided((65, ), (1, ), device='cuda:0', dtype=torch.float32) primals_22 = rand_strided((256, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_23 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_24 = rand_strided((256, 256, 1, 1), (256, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_25 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.optim import torch.utils.data class SuperPointNet(torch.nn.Module): """ Pytorch definition of SuperPoint Network. """ def __init__(self): super(SuperPointNet, self).__init__() self.relu = torch.nn.ReLU(inplace=True) self.pool = torch.nn.MaxPool2d(kernel_size=2, stride=2) c1, c2, c3, c4, c5, d1 = 64, 64, 128, 128, 256, 256 self.conv1a = torch.nn.Conv2d(1, c1, kernel_size=3, stride=1, padding=1 ) self.conv1b = torch.nn.Conv2d(c1, c1, kernel_size=3, stride=1, padding=1) self.conv2a = torch.nn.Conv2d(c1, c2, kernel_size=3, stride=1, padding=1) self.conv2b = torch.nn.Conv2d(c2, c2, kernel_size=3, stride=1, padding=1) self.conv3a = torch.nn.Conv2d(c2, c3, kernel_size=3, stride=1, padding=1) self.conv3b = torch.nn.Conv2d(c3, c3, kernel_size=3, stride=1, padding=1) self.conv4a = torch.nn.Conv2d(c3, c4, kernel_size=3, stride=1, padding=1) self.conv4b = torch.nn.Conv2d(c4, c4, kernel_size=3, stride=1, padding=1) self.convPa = torch.nn.Conv2d(c4, c5, kernel_size=3, stride=1, padding=1) self.convPb = torch.nn.Conv2d(c5, 65, kernel_size=1, stride=1, padding=0) self.convDa = torch.nn.Conv2d(c4, c5, kernel_size=3, stride=1, padding=1) self.convDb = torch.nn.Conv2d(c5, d1, kernel_size=1, stride=1, padding=0) def forward(self, x): """ Forward pass that jointly computes unprocessed point and descriptor tensors. Input x: Image pytorch tensor shaped N x 1 x H x W. Output semi: Output point pytorch tensor shaped N x 65 x H/8 x W/8. desc: Output descriptor pytorch tensor shaped N x 256 x H/8 x W/8. """ x = self.relu(self.conv1a(x)) x = self.relu(self.conv1b(x)) x = self.pool(x) x = self.relu(self.conv2a(x)) x = self.relu(self.conv2b(x)) x = self.pool(x) x = self.relu(self.conv3a(x)) x = self.relu(self.conv3b(x)) x = self.pool(x) x = self.relu(self.conv4a(x)) x = self.relu(self.conv4b(x)) cPa = self.relu(self.convPa(x)) semi = self.convPb(cPa) cDa = self.relu(self.convDa(x)) desc = self.convDb(cDa) dn = torch.norm(desc, p=2, dim=1) desc = desc.div(torch.unsqueeze(dn, 1)) return semi, desc def get_inputs(): return [torch.rand([4, 1, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.optim import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = yindex // 64 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 64 * x2 + 576 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = yindex // 64 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 64 * x2 + 576 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 128 y1 = yindex // 128 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 128 * x2 + 1152 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 128 y1 = yindex // 128 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 128 * x2 + 1152 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_convolution_relu_4(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 256 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 64 y1 = yindex // 64 tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1, 1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(out_ptr0 + (y0 + 64 * x2 + 262144 * y1), tmp4, ymask) @triton.jit def triton_poi_fused_convolution_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_6(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 64 x1 = xindex // 64 % 32 x2 = xindex // 2048 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 128 * x1 + 8192 * x2), None) tmp1 = tl.load(in_ptr0 + (64 + x0 + 128 * x1 + 8192 * x2), None) tmp3 = tl.load(in_ptr0 + (4096 + x0 + 128 * x1 + 8192 * x2), None) tmp5 = tl.load(in_ptr0 + (4160 + x0 + 128 * x1 + 8192 * x2), None) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x3, tmp6, None) tl.store(out_ptr1 + x3, tmp16, None) @triton.jit def triton_poi_fused_convolution_relu_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_8(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 64 x1 = xindex // 64 % 16 x2 = xindex // 1024 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 128 * x1 + 4096 * x2), None) tmp1 = tl.load(in_ptr0 + (64 + x0 + 128 * x1 + 4096 * x2), None) tmp3 = tl.load(in_ptr0 + (2048 + x0 + 128 * x1 + 4096 * x2), None) tmp5 = tl.load(in_ptr0 + (2112 + x0 + 128 * x1 + 4096 * x2), None) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x3, tmp6, None) tl.store(out_ptr1 + x3, tmp16, None) @triton.jit def triton_poi_fused_convolution_relu_9(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_10(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 128 x1 = xindex // 128 % 8 x2 = xindex // 1024 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 256 * x1 + 4096 * x2), None) tmp1 = tl.load(in_ptr0 + (128 + x0 + 256 * x1 + 4096 * x2), None) tmp3 = tl.load(in_ptr0 + (2048 + x0 + 256 * x1 + 4096 * x2), None) tmp5 = tl.load(in_ptr0 + (2176 + x0 + 256 * x1 + 4096 * x2), None) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x3, tmp6, None) tl.store(out_ptr1 + x3, tmp16, None) @triton.jit def triton_poi_fused_convolution_relu_11(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_convolution_relu_12(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_convolution_13(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 260 xnumel = 64 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 65 y1 = yindex // 65 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 65 * x2 + 4160 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 64 * y3), tmp2, xmask & ymask) @triton.jit def triton_per_fused_convolution_linalg_vector_norm_14(in_out_ptr0, in_out_ptr1, in_ptr0, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_out_ptr0 + (r1 + 256 * x0), None) tmp1 = tl.load(in_ptr0 + r1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tmp2 * tmp2 tmp4 = tl.broadcast_to(tmp3, [RBLOCK]) tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0)) tmp7 = libdevice.sqrt(tmp6) tl.store(in_out_ptr0 + (r1 + 256 * x0), tmp2, None) tl.debug_barrier() tl.store(in_out_ptr1 + x0, tmp7, None) @triton.jit def triton_poi_fused_div_15(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): xnumel = 64 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 256 y1 = yindex // 256 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 256 * x2 + 16384 * y1), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x2 + 64 * y1), xmask, eviction_policy= 'evict_last') tmp2 = tmp0 / tmp1 tl.store(out_ptr0 + (x2 + 64 * y3), tmp2, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25) = args args.clear() assert_size_stride(primals_1, (64, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_2, (64,), (1,)) assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1)) assert_size_stride(primals_4, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_5, (64,), (1,)) assert_size_stride(primals_6, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_7, (64,), (1,)) assert_size_stride(primals_8, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_9, (64,), (1,)) assert_size_stride(primals_10, (128, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_11, (128,), (1,)) assert_size_stride(primals_12, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_13, (128,), (1,)) assert_size_stride(primals_14, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_15, (128,), (1,)) assert_size_stride(primals_16, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_17, (128,), (1,)) assert_size_stride(primals_18, (256, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_19, (256,), (1,)) assert_size_stride(primals_20, (65, 256, 1, 1), (256, 1, 1, 1)) assert_size_stride(primals_21, (65,), (1,)) assert_size_stride(primals_22, (256, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_23, (256,), (1,)) assert_size_stride(primals_24, (256, 256, 1, 1), (256, 1, 1, 1)) assert_size_stride(primals_25, (256,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 64, 3, 3), (576, 1, 192, 64), torch. float32) get_raw_stream(0) triton_poi_fused_0[grid(4096, 9)](primals_4, buf0, 4096, 9, XBLOCK= 16, YBLOCK=64, num_warps=4, num_stages=1) del primals_4 buf1 = empty_strided_cuda((64, 64, 3, 3), (576, 1, 192, 64), torch. float32) triton_poi_fused_0[grid(4096, 9)](primals_6, buf1, 4096, 9, XBLOCK= 16, YBLOCK=64, num_warps=4, num_stages=1) del primals_6 buf2 = empty_strided_cuda((64, 64, 3, 3), (576, 1, 192, 64), torch. float32) triton_poi_fused_0[grid(4096, 9)](primals_8, buf2, 4096, 9, XBLOCK= 16, YBLOCK=64, num_warps=4, num_stages=1) del primals_8 buf3 = empty_strided_cuda((128, 64, 3, 3), (576, 1, 192, 64), torch .float32) triton_poi_fused_1[grid(8192, 9)](primals_10, buf3, 8192, 9, XBLOCK =16, YBLOCK=64, num_warps=4, num_stages=1) del primals_10 buf4 = empty_strided_cuda((128, 128, 3, 3), (1152, 1, 384, 128), torch.float32) triton_poi_fused_2[grid(16384, 9)](primals_12, buf4, 16384, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_12 buf5 = empty_strided_cuda((128, 128, 3, 3), (1152, 1, 384, 128), torch.float32) triton_poi_fused_2[grid(16384, 9)](primals_14, buf5, 16384, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_14 buf6 = empty_strided_cuda((128, 128, 3, 3), (1152, 1, 384, 128), torch.float32) triton_poi_fused_2[grid(16384, 9)](primals_16, buf6, 16384, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_16 buf7 = empty_strided_cuda((256, 128, 3, 3), (1152, 1, 384, 128), torch.float32) triton_poi_fused_3[grid(32768, 9)](primals_18, buf7, 32768, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_18 buf8 = empty_strided_cuda((256, 128, 3, 3), (1152, 1, 384, 128), torch.float32) triton_poi_fused_3[grid(32768, 9)](primals_22, buf8, 32768, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_22 buf9 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf9, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf10 = empty_strided_cuda((4, 64, 64, 64), (262144, 1, 4096, 64), torch.float32) triton_poi_fused_convolution_relu_4[grid(256, 4096)](buf9, primals_2, buf10, 256, 4096, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del buf9 del primals_2 buf11 = extern_kernels.convolution(buf10, buf0, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf11, (4, 64, 64, 64), (262144, 1, 4096, 64)) buf12 = buf11 del buf11 triton_poi_fused_convolution_relu_5[grid(1048576)](buf12, primals_5, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) del primals_5 buf13 = empty_strided_cuda((4, 64, 32, 32), (65536, 1, 2048, 64), torch.float32) buf14 = empty_strided_cuda((4, 64, 32, 32), (65536, 1, 2048, 64), torch.int8) triton_poi_fused_max_pool2d_with_indices_6[grid(262144)](buf12, buf13, buf14, 262144, XBLOCK=1024, num_warps=4, num_stages=1) buf15 = extern_kernels.convolution(buf13, buf1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf15, (4, 64, 32, 32), (65536, 1, 2048, 64)) buf16 = buf15 del buf15 triton_poi_fused_convolution_relu_7[grid(262144)](buf16, primals_7, 262144, XBLOCK=512, num_warps=8, num_stages=1) del primals_7 buf17 = extern_kernels.convolution(buf16, buf2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf17, (4, 64, 32, 32), (65536, 1, 2048, 64)) buf18 = buf17 del buf17 triton_poi_fused_convolution_relu_7[grid(262144)](buf18, primals_9, 262144, XBLOCK=512, num_warps=8, num_stages=1) del primals_9 buf19 = empty_strided_cuda((4, 64, 16, 16), (16384, 1, 1024, 64), torch.float32) buf20 = empty_strided_cuda((4, 64, 16, 16), (16384, 1, 1024, 64), torch.int8) triton_poi_fused_max_pool2d_with_indices_8[grid(65536)](buf18, buf19, buf20, 65536, XBLOCK=256, num_warps=4, num_stages=1) buf21 = extern_kernels.convolution(buf19, buf3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf21, (4, 128, 16, 16), (32768, 1, 2048, 128)) buf22 = buf21 del buf21 triton_poi_fused_convolution_relu_9[grid(131072)](buf22, primals_11, 131072, XBLOCK=512, num_warps=8, num_stages=1) del primals_11 buf23 = extern_kernels.convolution(buf22, buf4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf23, (4, 128, 16, 16), (32768, 1, 2048, 128)) buf24 = buf23 del buf23 triton_poi_fused_convolution_relu_9[grid(131072)](buf24, primals_13, 131072, XBLOCK=512, num_warps=8, num_stages=1) del primals_13 buf25 = empty_strided_cuda((4, 128, 8, 8), (8192, 1, 1024, 128), torch.float32) buf26 = empty_strided_cuda((4, 128, 8, 8), (8192, 1, 1024, 128), torch.int8) triton_poi_fused_max_pool2d_with_indices_10[grid(32768)](buf24, buf25, buf26, 32768, XBLOCK=128, num_warps=4, num_stages=1) buf27 = extern_kernels.convolution(buf25, buf5, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf27, (4, 128, 8, 8), (8192, 1, 1024, 128)) buf28 = buf27 del buf27 triton_poi_fused_convolution_relu_11[grid(32768)](buf28, primals_15, 32768, XBLOCK=256, num_warps=4, num_stages=1) del primals_15 buf29 = extern_kernels.convolution(buf28, buf6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf29, (4, 128, 8, 8), (8192, 1, 1024, 128)) buf30 = buf29 del buf29 triton_poi_fused_convolution_relu_11[grid(32768)](buf30, primals_17, 32768, XBLOCK=256, num_warps=4, num_stages=1) del primals_17 buf31 = extern_kernels.convolution(buf30, buf7, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf31, (4, 256, 8, 8), (16384, 1, 2048, 256)) buf32 = buf31 del buf31 triton_poi_fused_convolution_relu_12[grid(65536)](buf32, primals_19, 65536, XBLOCK=512, num_warps=4, num_stages=1) del primals_19 buf33 = extern_kernels.convolution(buf32, primals_20, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf33, (4, 65, 8, 8), (4160, 1, 520, 65)) buf34 = empty_strided_cuda((4, 65, 8, 8), (4160, 64, 8, 1), torch. float32) triton_poi_fused_convolution_13[grid(260, 64)](buf33, primals_21, buf34, 260, 64, XBLOCK=64, YBLOCK=4, num_warps=4, num_stages=1) del buf33 del primals_21 buf35 = extern_kernels.convolution(buf30, buf8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf35, (4, 256, 8, 8), (16384, 1, 2048, 256)) buf36 = buf35 del buf35 triton_poi_fused_convolution_relu_12[grid(65536)](buf36, primals_23, 65536, XBLOCK=512, num_warps=4, num_stages=1) del primals_23 buf37 = extern_kernels.convolution(buf36, primals_24, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf37, (4, 256, 8, 8), (16384, 1, 2048, 256)) buf38 = buf37 del buf37 buf39 = empty_strided_cuda((4, 8, 8), (64, 8, 1), torch.float32) buf40 = buf39 del buf39 triton_per_fused_convolution_linalg_vector_norm_14[grid(256)](buf38, buf40, primals_25, 256, 256, num_warps=2, num_stages=1) del primals_25 buf41 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch .float32) triton_poi_fused_div_15[grid(1024, 64)](buf38, buf40, buf41, 1024, 64, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) return (buf34, buf41, primals_1, primals_3, buf0, buf1, buf2, buf3, buf4, buf5, buf6, buf7, primals_20, buf8, primals_24, buf10, buf12, buf13, buf14, buf16, buf18, buf19, buf20, buf22, buf24, buf25, buf26, buf28, buf30, buf32, buf36, buf38, reinterpret_tensor(buf40, (4, 1, 8, 8), (64, 64, 8, 1), 0)) class SuperPointNetNew(torch.nn.Module): """ Pytorch definition of SuperPoint Network. """ def __init__(self): super(SuperPointNetNew, self).__init__() self.relu = torch.nn.ReLU(inplace=True) self.pool = torch.nn.MaxPool2d(kernel_size=2, stride=2) c1, c2, c3, c4, c5, d1 = 64, 64, 128, 128, 256, 256 self.conv1a = torch.nn.Conv2d(1, c1, kernel_size=3, stride=1, padding=1 ) self.conv1b = torch.nn.Conv2d(c1, c1, kernel_size=3, stride=1, padding=1) self.conv2a = torch.nn.Conv2d(c1, c2, kernel_size=3, stride=1, padding=1) self.conv2b = torch.nn.Conv2d(c2, c2, kernel_size=3, stride=1, padding=1) self.conv3a = torch.nn.Conv2d(c2, c3, kernel_size=3, stride=1, padding=1) self.conv3b = torch.nn.Conv2d(c3, c3, kernel_size=3, stride=1, padding=1) self.conv4a = torch.nn.Conv2d(c3, c4, kernel_size=3, stride=1, padding=1) self.conv4b = torch.nn.Conv2d(c4, c4, kernel_size=3, stride=1, padding=1) self.convPa = torch.nn.Conv2d(c4, c5, kernel_size=3, stride=1, padding=1) self.convPb = torch.nn.Conv2d(c5, 65, kernel_size=1, stride=1, padding=0) self.convDa = torch.nn.Conv2d(c4, c5, kernel_size=3, stride=1, padding=1) self.convDb = torch.nn.Conv2d(c5, d1, kernel_size=1, stride=1, padding=0) def forward(self, input_0): primals_1 = self.conv1a.weight primals_2 = self.conv1a.bias primals_4 = self.conv1b.weight primals_5 = self.conv1b.bias primals_6 = self.conv2a.weight primals_7 = self.conv2a.bias primals_8 = self.conv2b.weight primals_9 = self.conv2b.bias primals_10 = self.conv3a.weight primals_11 = self.conv3a.bias primals_12 = self.conv3b.weight primals_13 = self.conv3b.bias primals_14 = self.conv4a.weight primals_15 = self.conv4a.bias primals_16 = self.conv4b.weight primals_17 = self.conv4b.bias primals_18 = self.convPa.weight primals_19 = self.convPa.bias primals_20 = self.convPb.weight primals_21 = self.convPb.bias primals_22 = self.convDa.weight primals_23 = self.convDa.bias primals_24 = self.convDb.weight primals_25 = self.convDb.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25]) return output[0], output[1]
Sunny-Qin-0314/pytorch-superpoint
SuperPointNet
false
1,134
[ "MIT" ]
0
5c5325a1e5917afcc7469e137206990a8cd33725
https://github.com/Sunny-Qin-0314/pytorch-superpoint/tree/5c5325a1e5917afcc7469e137206990a8cd33725
import torch import torch.optim import torch.utils.data class Model(torch.nn.Module): """ Pytorch definition of SuperPoint Network. """ def __init__(self): super().__init__() self.relu = torch.nn.ReLU(inplace=True) self.pool = torch.nn.MaxPool2d(kernel_size=2, stride=2) c1, c2, c3, c4, c5, d1 = 64, 64, 128, 128, 256, 256 self.conv1a = torch.nn.Conv2d(1, c1, kernel_size=3, stride=1, padding=1 ) self.conv1b = torch.nn.Conv2d(c1, c1, kernel_size=3, stride=1, padding=1) self.conv2a = torch.nn.Conv2d(c1, c2, kernel_size=3, stride=1, padding=1) self.conv2b = torch.nn.Conv2d(c2, c2, kernel_size=3, stride=1, padding=1) self.conv3a = torch.nn.Conv2d(c2, c3, kernel_size=3, stride=1, padding=1) self.conv3b = torch.nn.Conv2d(c3, c3, kernel_size=3, stride=1, padding=1) self.conv4a = torch.nn.Conv2d(c3, c4, kernel_size=3, stride=1, padding=1) self.conv4b = torch.nn.Conv2d(c4, c4, kernel_size=3, stride=1, padding=1) self.convPa = torch.nn.Conv2d(c4, c5, kernel_size=3, stride=1, padding=1) self.convPb = torch.nn.Conv2d(c5, 65, kernel_size=1, stride=1, padding=0) self.convDa = torch.nn.Conv2d(c4, c5, kernel_size=3, stride=1, padding=1) self.convDb = torch.nn.Conv2d(c5, d1, kernel_size=1, stride=1, padding=0) def forward(self, x): """ Forward pass that jointly computes unprocessed point and descriptor tensors. Input x: Image pytorch tensor shaped N x 1 x H x W. Output semi: Output point pytorch tensor shaped N x 65 x H/8 x W/8. desc: Output descriptor pytorch tensor shaped N x 256 x H/8 x W/8. """ x = self.relu(self.conv1a(x)) x = self.relu(self.conv1b(x)) x = self.pool(x) x = self.relu(self.conv2a(x)) x = self.relu(self.conv2b(x)) x = self.pool(x) x = self.relu(self.conv3a(x)) x = self.relu(self.conv3b(x)) x = self.pool(x) x = self.relu(self.conv4a(x)) x = self.relu(self.conv4b(x)) cPa = self.relu(self.convPa(x)) semi = self.convPb(cPa) cDa = self.relu(self.convDa(x)) desc = self.convDb(cDa) dn = torch.norm(desc, p=2, dim=1) desc = desc.div(torch.unsqueeze(dn, 1)) return semi, desc def get_inputs(): return [torch.rand([4, 1, 64, 64])] def get_init_inputs(): return []
ArcMarginProduct_subcenter
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/fh/cfhnguw4v6uy4ysjg54ojclakwi3bj2lte6oqizl4rpf4lcxpiyp.py # Topologically Sorted Source Nodes: [normalize], Original ATen: [aten.div] # Source node to ATen node mapping: # normalize => div # Graph fragment: # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_1, %expand), kwargs = {}) triton_poi_fused_div_0 = async_compile.triton('triton_poi_fused_div_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-12 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + (x3), tmp15, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/2s/c2s7vvaz25xjuqqa7ymam5rco6ey7ycspmykfxm67txox57kysz2.py # Topologically Sorted Source Nodes: [normalize_1], Original ATen: [aten.div] # Source node to ATen node mapping: # normalize_1 => div_1 # Graph fragment: # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_2, %expand_1), kwargs = {}) triton_poi_fused_div_1 = async_compile.triton('triton_poi_fused_div_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 48 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-12 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + (x2), tmp15, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/o7/co7bzw6oskv7grmzjusyhgf3movzeocopdg3tnslwlwhxsiewsyw.py # Topologically Sorted Source Nodes: [max_1], Original ATen: [aten.max] # Source node to ATen node mapping: # max_1 => getitem, max_1 # Graph fragment: # %max_1 : [num_users=2] = call_function[target=torch.ops.aten.max.dim](args = (%view_2, 2), kwargs = {}) # %getitem : [num_users=1] = call_function[target=operator.getitem](args = (%max_1, 0), kwargs = {}) triton_poi_fused_max_2 = async_compile.triton('triton_poi_fused_max_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i64', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_2(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (3*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (3*x0)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + (3*x0)), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp0, tmp1) tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp5 = tmp0 > tmp1 tmp6 = tmp0 == tmp1 tmp7 = tmp0 != tmp0 tmp8 = tmp1 != tmp1 tmp9 = tmp7 > tmp8 tmp10 = tmp5 | tmp9 tmp11 = tmp7 & tmp8 tmp12 = tmp6 | tmp11 tmp13 = tl.full([1], 0, tl.int64) tmp14 = tl.full([1], 1, tl.int64) tmp15 = tmp13 < tmp14 tmp16 = tmp12 & tmp15 tmp17 = tmp10 | tmp16 tmp18 = tl.where(tmp17, tmp0, tmp1) tmp19 = tl.where(tmp17, tmp13, tmp14) tmp20 = tmp18 > tmp3 tmp21 = tmp18 == tmp3 tmp22 = tmp18 != tmp18 tmp23 = tmp3 != tmp3 tmp24 = tmp22 > tmp23 tmp25 = tmp20 | tmp24 tmp26 = tmp22 & tmp23 tmp27 = tmp21 | tmp26 tmp28 = tl.full([1], 2, tl.int64) tmp29 = tmp19 < tmp28 tmp30 = tmp27 & tmp29 tmp31 = tmp25 | tmp30 tmp32 = tl.where(tmp31, tmp18, tmp3) tmp33 = tl.where(tmp31, tmp19, tmp28) tl.store(out_ptr0 + (x0), tmp4, xmask) tl.store(out_ptr1 + (x0), tmp33, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (12, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [normalize], Original ATen: [aten.div] stream0 = get_raw_stream(0) triton_poi_fused_div_0.run(primals_1, buf0, 256, grid=grid(256), stream=stream0) del primals_1 buf1 = empty_strided_cuda((12, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [normalize_1], Original ATen: [aten.div] triton_poi_fused_div_1.run(primals_2, buf1, 48, grid=grid(48), stream=stream0) buf2 = empty_strided_cuda((64, 12), (12, 1), torch.float32) # Topologically Sorted Source Nodes: [cosine_all], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (4, 12), (1, 4), 0), out=buf2) del buf1 buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32) buf4 = empty_strided_cuda((64, 4), (4, 1), torch.int64) # Topologically Sorted Source Nodes: [max_1], Original ATen: [aten.max] triton_poi_fused_max_2.run(buf2, buf3, buf4, 256, grid=grid(256), stream=stream0) del buf2 return (buf3, primals_2, reinterpret_tensor(buf0, (64, 4), (4, 1), 0), reinterpret_tensor(buf4, (64, 4, 1), (4, 1, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((12, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.data import torch.nn.parallel class ArcMarginProduct_subcenter(nn.Module): def __init__(self, in_features, out_features, k=3): super().__init__() self.weight = nn.Parameter(torch.FloatTensor(out_features * k, in_features)) self.reset_parameters() self.k = k self.out_features = out_features def reset_parameters(self): stdv = 1.0 / math.sqrt(self.weight.size(1)) self.weight.data.uniform_(-stdv, stdv) def forward(self, features): cosine_all = F.linear(F.normalize(features), F.normalize(self.weight)) cosine_all = cosine_all.view(-1, self.out_features, self.k) cosine, _ = torch.max(cosine_all, dim=2) return cosine def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_features': 4, 'out_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import math import torch.nn as nn import torch.utils.data import torch.nn.parallel assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-12 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + x3, tmp15, xmask) @triton.jit def triton_poi_fused_div_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 48 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-12 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + x2, tmp15, xmask) @triton.jit def triton_poi_fused_max_2(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 3 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 3 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 3 * x0), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp0, tmp1) tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp5 = tmp0 > tmp1 tmp6 = tmp0 == tmp1 tmp7 = tmp0 != tmp0 tmp8 = tmp1 != tmp1 tmp9 = tmp7 > tmp8 tmp10 = tmp5 | tmp9 tmp11 = tmp7 & tmp8 tmp12 = tmp6 | tmp11 tmp13 = tl.full([1], 0, tl.int64) tmp14 = tl.full([1], 1, tl.int64) tmp15 = tmp13 < tmp14 tmp16 = tmp12 & tmp15 tmp17 = tmp10 | tmp16 tmp18 = tl.where(tmp17, tmp0, tmp1) tmp19 = tl.where(tmp17, tmp13, tmp14) tmp20 = tmp18 > tmp3 tmp21 = tmp18 == tmp3 tmp22 = tmp18 != tmp18 tmp23 = tmp3 != tmp3 tmp24 = tmp22 > tmp23 tmp25 = tmp20 | tmp24 tmp26 = tmp22 & tmp23 tmp27 = tmp21 | tmp26 tmp28 = tl.full([1], 2, tl.int64) tmp29 = tmp19 < tmp28 tmp30 = tmp27 & tmp29 tmp31 = tmp25 | tmp30 tl.where(tmp31, tmp18, tmp3) tmp33 = tl.where(tmp31, tmp19, tmp28) tl.store(out_ptr0 + x0, tmp4, xmask) tl.store(out_ptr1 + x0, tmp33, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (12, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_0[grid(256)](primals_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((12, 4), (4, 1), torch.float32) triton_poi_fused_div_1[grid(48)](primals_2, buf1, 48, XBLOCK=64, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((64, 12), (12, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (4, 12), (1, 4), 0), out=buf2) del buf1 buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32) buf4 = empty_strided_cuda((64, 4), (4, 1), torch.int64) triton_poi_fused_max_2[grid(256)](buf2, buf3, buf4, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf2 return buf3, primals_2, reinterpret_tensor(buf0, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf4, (64, 4, 1), (4, 1, 1), 0) class ArcMarginProduct_subcenterNew(nn.Module): def __init__(self, in_features, out_features, k=3): super().__init__() self.weight = nn.Parameter(torch.FloatTensor(out_features * k, in_features)) self.reset_parameters() self.k = k self.out_features = out_features def reset_parameters(self): stdv = 1.0 / math.sqrt(self.weight.size(1)) self.weight.data.uniform_(-stdv, stdv) def forward(self, input_0): primals_2 = self.weight primals_1 = input_0 output = call([primals_1, primals_2]) return output[0]
Tanmengxuan/Google-Landmark-Recognition-2020-3rd-Place-Solution
ArcMarginProduct_subcenter
false
1,135
[ "Apache-2.0" ]
0
8e2d9056d5c88c6415827086809e73522b336fbb
https://github.com/Tanmengxuan/Google-Landmark-Recognition-2020-3rd-Place-Solution/tree/8e2d9056d5c88c6415827086809e73522b336fbb
import math import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.data import torch.nn.parallel class Model(nn.Module): def __init__(self, in_features, out_features, k=3): super().__init__() self.weight = nn.Parameter(torch.FloatTensor(out_features * k, in_features)) self.reset_parameters() self.k = k self.out_features = out_features def reset_parameters(self): stdv = 1.0 / math.sqrt(self.weight.size(1)) self.weight.data.uniform_(-stdv, stdv) def forward(self, features): cosine_all = F.linear(F.normalize(features), F.normalize(self.weight)) cosine_all = cosine_all.view(-1, self.out_features, self.k) cosine, _ = torch.max(cosine_all, dim=2) return cosine def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 4]
ChannelMixer
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/nh/cnhx37tsffx4r7taj3xi72s7yfpnnccem24fupfbht6b7bzliavu.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.gelu] # Source node to ATen node mapping: # x_1 => add, erf, mul, mul_1, mul_2 # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 0.5), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 0.7071067811865476), kwargs = {}) # %erf : [num_users=1] = call_function[target=torch.ops.aten.erf.default](args = (%mul_1,), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%erf, 1), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %add), kwargs = {}) triton_poi_fused_gelu_0 = async_compile.triton('triton_poi_fused_gelu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_gelu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_gelu_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = 0.7071067811865476 tmp4 = tmp0 * tmp3 tmp5 = libdevice.erf(tmp4) tmp6 = 1.0 tmp7 = tmp5 + tmp6 tmp8 = tmp2 * tmp7 tl.store(out_ptr0 + (x0), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/mz/cmz3wjq2uutgv7zzhrquuijmcstklp4wvd4q2ptdi3fpwbjqcpo6.py # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.add] # Source node to ATen node mapping: # x_3 => add_1 # Graph fragment: # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_3, %primals_1), kwargs = {}) triton_poi_fused_add_1 = async_compile.triton('triton_poi_fused_add_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x2), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.addmm] extern_kernels.addmm(primals_3, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_2 del primals_3 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.gelu] stream0 = get_raw_stream(0) triton_poi_fused_gelu_0.run(buf0, buf1, 256, grid=grid(256), stream=stream0) buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf2 # reuse # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.add] triton_poi_fused_add_1.run(buf3, primals_5, primals_1, 256, grid=grid(256), stream=stream0) del primals_5 return (buf3, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), buf0, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class ChannelMixer(nn.Module): def __init__(self, input_size, hidden_size, dropout=None): super(ChannelMixer, self).__init__() self.fc1 = nn.Linear(input_size, hidden_size) self.fc2 = nn.Linear(hidden_size, input_size) self.dropout = None if dropout is not None: self.dropout = nn.Dropout(dropout) self.activation = nn.GELU() def forward(self, x): input = x x = self.fc1(x) x = self.activation(x) if self.dropout is not None: x = self.dropout(x) x = self.fc2(x) if self.dropout is not None: x = self.dropout(x) x = x + input return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_size': 4, 'hidden_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_gelu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = 0.7071067811865476 tmp4 = tmp0 * tmp3 tmp5 = libdevice.erf(tmp4) tmp6 = 1.0 tmp7 = tmp5 + tmp6 tmp8 = tmp2 * tmp7 tl.store(out_ptr0 + x0, tmp8, xmask) @triton.jit def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x2, xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_3, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_2 del primals_3 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_gelu_0[grid(256)](buf0, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1) buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf2 triton_poi_fused_add_1[grid(256)](buf3, primals_5, primals_1, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 return buf3, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0 ), buf0, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), primals_4 class ChannelMixerNew(nn.Module): def __init__(self, input_size, hidden_size, dropout=None): super(ChannelMixerNew, self).__init__() self.fc1 = nn.Linear(input_size, hidden_size) self.fc2 = nn.Linear(hidden_size, input_size) self.dropout = None if dropout is not None: self.dropout = nn.Dropout(dropout) self.activation = nn.GELU() def forward(self, input_0): primals_2 = self.fc1.weight primals_3 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
TheRealMarVin/mlp-mixer
ChannelMixer
false
1,136
[ "MIT" ]
0
2124cb5c5adfc7af473cab535095471d4943adab
https://github.com/TheRealMarVin/mlp-mixer/tree/2124cb5c5adfc7af473cab535095471d4943adab
import torch import torch.nn as nn class Model(nn.Module): def __init__(self, input_size, hidden_size, dropout=None): super().__init__() self.fc1 = nn.Linear(input_size, hidden_size) self.fc2 = nn.Linear(hidden_size, input_size) self.dropout = None if dropout is not None: self.dropout = nn.Dropout(dropout) self.activation = nn.GELU() def forward(self, x): input = x x = self.fc1(x) x = self.activation(x) if self.dropout is not None: x = self.dropout(x) x = self.fc2(x) if self.dropout is not None: x = self.dropout(x) x = x + input return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 4]
Net
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/md/cmd3ewacyhu5w5hausgbjbmtnt5rr66cgczh4ibdypq7dz6p4v7g.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # x_1 => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8192], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 8192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, None) tl.store(out_ptr0 + (x2), tmp6, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (128, ), (1, )) assert_size_stride(primals_3, (128, 4), (4, 1)) assert_size_stride(primals_4, (4, 128), (128, 1)) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 128), (128, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 128), (1, 4), 0), out=buf0) del primals_3 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 128), (2048, 512, 128, 1), 0); del buf0 # reuse buf3 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1), torch.bool) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf3, 8192, grid=grid(8192), stream=stream0) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 128), (128, 1), 0), reinterpret_tensor(primals_4, (128, 4), (1, 128), 0), alpha=1, beta=1, out=buf2) del primals_5 return (reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 128), (128, 1), 0), primals_4, buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((128, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 128), (128, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn from torch.nn import functional as F class Net(nn.Module): def __init__(self, obs_dim, act_dim): super(Net, self).__init__() self.fc0 = nn.Linear(obs_dim, 128) self.fc1 = nn.Linear(128, act_dim) def forward(self, x): x = x.type_as(self.fc0.bias) x = F.relu(self.fc0(x)) x = self.fc1(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'obs_dim': 4, 'act_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, None) tl.store(out_ptr0 + x2, tmp6, None) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (128,), (1,)) assert_size_stride(primals_3, (128, 4), (4, 1)) assert_size_stride(primals_4, (4, 128), (128, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 128), (128, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 128), (1, 4), 0), out=buf0) del primals_3 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 128), (2048, 512, 128, 1), 0) del buf0 buf3 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(8192)](buf1, primals_2, buf3, 8192, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 128), (128, 1), 0), reinterpret_tensor(primals_4, (128, 4), (1, 128), 0), alpha=1, beta=1, out=buf2) del primals_5 return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 128), (128, 1), 0), primals_4, buf3 class NetNew(nn.Module): def __init__(self, obs_dim, act_dim): super(NetNew, self).__init__() self.fc0 = nn.Linear(obs_dim, 128) self.fc1 = nn.Linear(128, act_dim) def forward(self, input_0): primals_3 = self.fc0.weight primals_2 = self.fc0.bias primals_4 = self.fc1.weight primals_5 = self.fc1.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
TommeyChang/CS294-Homework
Net
false
1,137
[ "MIT" ]
0
17b525bf4366034b45c4febd89f1053d44550237
https://github.com/TommeyChang/CS294-Homework/tree/17b525bf4366034b45c4febd89f1053d44550237
import torch from torch import nn from torch.nn import functional as F class Model(nn.Module): def __init__(self, obs_dim, act_dim): super().__init__() self.fc0 = nn.Linear(obs_dim, 128) self.fc1 = nn.Linear(128, act_dim) def forward(self, x): x = x.type_as(self.fc0.bias) x = F.relu(self.fc0(x)) x = self.fc1(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 4]
ActorDownAction
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/cz/cczdmlbbmfz5zwvgavqgsp7p2chtvjm2zzbxhjk7w5jaagtfot3j.py # Topologically Sorted Source Nodes: [cat_1], Original ATen: [aten.cat] # Source node to ATen node mapping: # cat_1 => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%tanh, %tanh_1], -1), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = (xindex // 8) x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = libdevice.tanh(tmp5) tmp7 = tl.full(tmp6.shape, 0.0, tmp6.dtype) tmp8 = tl.where(tmp4, tmp6, tmp7) tmp9 = tmp0 >= tmp3 tmp10 = tl.full([1], 8, tl.int64) tmp11 = tmp0 < tmp10 tmp12 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp13 = libdevice.tanh(tmp12) tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype) tmp15 = tl.where(tmp9, tmp13, tmp14) tmp16 = tl.where(tmp4, tmp8, tmp15) tl.store(out_ptr0 + (x2), tmp16, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/m6/cm6ozsdmt5vl54fxwk7cgktzswysgn2c37vsaybpucplzehkrnnz.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # x => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) # %le_3 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_relu_threshold_backward_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32768], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 25600 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 400 x2 = xindex % 1600 x3 = (xindex // 1600) tmp0 = tl.load(in_out_ptr0 + (x4), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x4), tmp4, xmask) tl.store(out_ptr0 + (x2 + (1664*x3)), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/64/c64g5uxk2a5hbzuhd6oikla2gb5eyfjjb6kbh7btzswha52gl5ex.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # x_1 => relu_1 # Graph fragment: # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_3,), kwargs = {}) # %le_2 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_2 = async_compile.triton('triton_poi_fused_relu_threshold_backward_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32768], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 19200 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 300 x2 = (xindex // 1200) x3 = xindex % 1200 tmp0 = tl.load(in_ptr0 + (x4), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x3 + (1216*x2)), tmp4, xmask) tl.store(out_ptr1 + (x3 + (1280*x2)), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/4h/c4h6r6vefoeuinm5eqv2d6wqmfj2mnjacalp633y3m6bnseb2bnk.py # Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.relu, aten.view] # Source node to ATen node mapping: # x_1 => relu_1 # x_2 => view_4 # Graph fragment: # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_3,), kwargs = {}) # %view_4 : [num_users=2] = call_function[target=torch.ops.aten.reshape.default](args = (%relu_1, [64, 300]), kwargs = {}) triton_poi_fused_relu_view_3 = async_compile.triton('triton_poi_fused_relu_view_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32768], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_view_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_view_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 19200 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 300 x1 = (xindex // 300) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (300*(x1 % 4)) + (1216*(x1 // 4))), xmask) tl.store(out_ptr0 + (x2), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/yl/cyl3pepuyms7had2pbaeyfxi5l2wn7v5miawhd67mloipgsmgxwf.py # Topologically Sorted Source Nodes: [tanh_1, action], Original ATen: [aten.tanh, aten.mul] # Source node to ATen node mapping: # action => mul # tanh_1 => tanh_2 # Graph fragment: # %tanh_2 : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%view_5,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%tanh_2, 4), kwargs = {}) triton_poi_fused_mul_tanh_4 = async_compile.triton('triton_poi_fused_mul_tanh_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_tanh_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_tanh_4(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = libdevice.tanh(tmp0) tmp2 = 4.0 tmp3 = tmp1 * tmp2 tl.store(out_ptr0 + (x0), tmp3, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/tc/ctcyhc3j7gcndovmump6plsamm3m4336gv7k75wru64k3klwdqb5.py # Topologically Sorted Source Nodes: [msg_down], Original ATen: [aten.linalg_vector_norm, aten.div] # Source node to ATen node mapping: # msg_down => div, pow_1, pow_2, sum_1 # Graph fragment: # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%view_11, 2.0), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [-1], True), kwargs = {}) # %pow_2 : [num_users=2] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_11, %expand), kwargs = {}) triton_per_fused_div_linalg_vector_norm_5 = async_compile.triton('triton_per_fused_div_linalg_vector_norm_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[64, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_div_linalg_vector_norm_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_div_linalg_vector_norm_5(in_out_ptr0, in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 64 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0) tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.where(xmask, tmp2, 0) tmp5 = tl.sum(tmp4, 1)[:, None] tmp6 = libdevice.sqrt(tmp5) tmp7 = 1e-12 tmp8 = triton_helpers.maximum(tmp6, tmp7) tmp9 = tmp0 / tmp8 tl.debug_barrier() tl.store(in_out_ptr0 + (x0), tmp6, xmask) tl.store(out_ptr0 + (r1 + (16*x0)), tmp9, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (400, 8), (8, 1)) assert_size_stride(primals_4, (400, ), (1, )) assert_size_stride(primals_5, (300, 400), (400, 1)) assert_size_stride(primals_6, (300, ), (1, )) assert_size_stride(primals_7, (4, 300), (300, 1)) assert_size_stride(primals_8, (4, ), (1, )) assert_size_stride(primals_9, (400, 8), (8, 1)) assert_size_stride(primals_10, (400, ), (1, )) assert_size_stride(primals_11, (300, 400), (400, 1)) assert_size_stride(primals_12, (300, ), (1, )) assert_size_stride(primals_13, (16, 300), (300, 1)) assert_size_stride(primals_14, (16, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.float32) # Topologically Sorted Source Nodes: [cat_1], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(primals_1, primals_2, buf0, 512, grid=grid(512), stream=stream0) del primals_1 del primals_2 buf1 = empty_strided_cuda((64, 400), (400, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf0, (64, 8), (8, 1), 0), reinterpret_tensor(primals_3, (8, 400), (1, 8), 0), out=buf1) del primals_3 buf2 = reinterpret_tensor(buf1, (4, 4, 4, 400), (6400, 1600, 400, 1), 0); del buf1 # reuse buf20 = empty_strided_cuda((4, 4, 4, 400), (6656, 1664, 400, 1), torch.bool) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_1.run(buf2, primals_4, buf20, 25600, grid=grid(25600), stream=stream0) del primals_4 buf3 = empty_strided_cuda((64, 300), (300, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf2, (64, 400), (400, 1), 0), reinterpret_tensor(primals_5, (400, 300), (1, 400), 0), out=buf3) buf4 = empty_strided_cuda((4, 4, 4, 300), (4864, 1216, 300, 1), torch.float32) buf19 = empty_strided_cuda((4, 4, 4, 300), (5120, 1280, 300, 1), torch.bool) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_2.run(buf3, primals_6, buf4, buf19, 19200, grid=grid(19200), stream=stream0) del primals_6 buf5 = buf3; del buf3 # reuse # Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.relu, aten.view] triton_poi_fused_relu_view_3.run(buf4, buf5, 19200, grid=grid(19200), stream=stream0) buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.addmm] extern_kernels.addmm(primals_8, buf5, reinterpret_tensor(primals_7, (300, 4), (1, 300), 0), alpha=1, beta=1, out=buf6) del primals_8 buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [tanh_1, action], Original ATen: [aten.tanh, aten.mul] triton_poi_fused_mul_tanh_4.run(buf6, buf7, 256, grid=grid(256), stream=stream0) buf8 = empty_strided_cuda((64, 400), (400, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf0, (64, 8), (8, 1), 0), reinterpret_tensor(primals_9, (8, 400), (1, 8), 0), out=buf8) del primals_9 buf9 = reinterpret_tensor(buf8, (4, 4, 4, 400), (6400, 1600, 400, 1), 0); del buf8 # reuse buf18 = empty_strided_cuda((4, 4, 4, 400), (6656, 1664, 400, 1), torch.bool) # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_1.run(buf9, primals_10, buf18, 25600, grid=grid(25600), stream=stream0) del primals_10 buf10 = empty_strided_cuda((64, 300), (300, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf9, (64, 400), (400, 1), 0), reinterpret_tensor(primals_11, (400, 300), (1, 400), 0), out=buf10) buf11 = buf4; del buf4 # reuse buf17 = empty_strided_cuda((4, 4, 4, 300), (5120, 1280, 300, 1), torch.bool) # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_2.run(buf10, primals_12, buf11, buf17, 19200, grid=grid(19200), stream=stream0) del primals_12 buf12 = buf10; del buf10 # reuse # Topologically Sorted Source Nodes: [x_4, x_5], Original ATen: [aten.relu, aten.view] triton_poi_fused_relu_view_3.run(buf11, buf12, 19200, grid=grid(19200), stream=stream0) del buf11 buf13 = empty_strided_cuda((64, 16), (16, 1), torch.float32) # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.addmm] extern_kernels.addmm(primals_14, buf12, reinterpret_tensor(primals_13, (300, 16), (1, 300), 0), alpha=1, beta=1, out=buf13) del primals_14 buf14 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf15 = reinterpret_tensor(buf14, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf14 # reuse buf16 = empty_strided_cuda((4, 4, 4, 16), (256, 64, 16, 1), torch.float32) # Topologically Sorted Source Nodes: [msg_down], Original ATen: [aten.linalg_vector_norm, aten.div] triton_per_fused_div_linalg_vector_norm_5.run(buf15, buf13, buf16, 64, 16, grid=grid(64), stream=stream0) return (buf7, buf16, reinterpret_tensor(buf0, (64, 8), (8, 1), 0), reinterpret_tensor(buf2, (64, 400), (400, 1), 0), buf5, buf6, reinterpret_tensor(buf9, (64, 400), (400, 1), 0), buf12, buf13, buf15, primals_13, buf17, primals_11, buf18, primals_7, buf19, primals_5, buf20, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((400, 8), (8, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((400, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((300, 400), (400, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((300, ), (1, ), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, 300), (300, 1), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((400, 8), (8, 1), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((400, ), (1, ), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((300, 400), (400, 1), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((300, ), (1, ), device='cuda:0', dtype=torch.float32) primals_13 = rand_strided((16, 300), (300, 1), device='cuda:0', dtype=torch.float32) primals_14 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn import torch.nn.functional as F class MLPBase(nn.Module): def __init__(self, num_inputs, num_outputs): super(MLPBase, self).__init__() self.l1 = nn.Linear(num_inputs, 400) self.l2 = nn.Linear(400, 300) self.l3 = nn.Linear(300, num_outputs) def forward(self, inputs): x = F.relu(self.l1(inputs)) x = F.relu(self.l2(x)) x = self.l3(x) return x class ActorDownAction(nn.Module): """a top-down module used in bothway message passing that passes messages to children and outputs action""" def __init__(self, self_input_dim, action_dim, msg_dim, max_action, max_children): super(ActorDownAction, self).__init__() self.max_action = max_action self.action_base = MLPBase(self_input_dim + msg_dim, action_dim) self.msg_base = MLPBase(self_input_dim + msg_dim, msg_dim * max_children) def forward(self, x, m): xm = torch.cat((x, m), dim=-1) xm = torch.tanh(xm) action = self.max_action * torch.tanh(self.action_base(xm)) msg_down = self.msg_base(xm) msg_down = F.normalize(msg_down, dim=-1) return action, msg_down def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'self_input_dim': 4, 'action_dim': 4, 'msg_dim': 4, 'max_action': 4, 'max_children': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice from torch import nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = libdevice.tanh(tmp5) tmp7 = tl.full(tmp6.shape, 0.0, tmp6.dtype) tmp8 = tl.where(tmp4, tmp6, tmp7) tmp9 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp12 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp13 = libdevice.tanh(tmp12) tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype) tmp15 = tl.where(tmp9, tmp13, tmp14) tmp16 = tl.where(tmp4, tmp8, tmp15) tl.store(out_ptr0 + x2, tmp16, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 25600 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 400 x2 = xindex % 1600 x3 = xindex // 1600 tmp0 = tl.load(in_out_ptr0 + x4, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x4, tmp4, xmask) tl.store(out_ptr0 + (x2 + 1664 * x3), tmp6, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 19200 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 300 x2 = xindex // 1200 x3 = xindex % 1200 tmp0 = tl.load(in_ptr0 + x4, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x3 + 1216 * x2), tmp4, xmask) tl.store(out_ptr1 + (x3 + 1280 * x2), tmp6, xmask) @triton.jit def triton_poi_fused_relu_view_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 19200 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 300 x1 = xindex // 300 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 300 * (x1 % 4) + 1216 * (x1 // 4)), xmask) tl.store(out_ptr0 + x2, tmp0, xmask) @triton.jit def triton_poi_fused_mul_tanh_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = libdevice.tanh(tmp0) tmp2 = 4.0 tmp3 = tmp1 * tmp2 tl.store(out_ptr0 + x0, tmp3, xmask) @triton.jit def triton_per_fused_div_linalg_vector_norm_5(in_out_ptr0, in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 64 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.where(xmask, tmp2, 0) tmp5 = tl.sum(tmp4, 1)[:, None] tmp6 = libdevice.sqrt(tmp5) tmp7 = 1e-12 tmp8 = triton_helpers.maximum(tmp6, tmp7) tmp9 = tmp0 / tmp8 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp6, xmask) tl.store(out_ptr0 + (r1 + 16 * x0), tmp9, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (400, 8), (8, 1)) assert_size_stride(primals_4, (400,), (1,)) assert_size_stride(primals_5, (300, 400), (400, 1)) assert_size_stride(primals_6, (300,), (1,)) assert_size_stride(primals_7, (4, 300), (300, 1)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (400, 8), (8, 1)) assert_size_stride(primals_10, (400,), (1,)) assert_size_stride(primals_11, (300, 400), (400, 1)) assert_size_stride(primals_12, (300,), (1,)) assert_size_stride(primals_13, (16, 300), (300, 1)) assert_size_stride(primals_14, (16,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(512)](primals_1, primals_2, buf0, 512, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 del primals_2 buf1 = empty_strided_cuda((64, 400), (400, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf0, (64, 8), (8, 1), 0), reinterpret_tensor(primals_3, (8, 400), (1, 8), 0), out=buf1) del primals_3 buf2 = reinterpret_tensor(buf1, (4, 4, 4, 400), (6400, 1600, 400, 1), 0 ) del buf1 buf20 = empty_strided_cuda((4, 4, 4, 400), (6656, 1664, 400, 1), torch.bool) triton_poi_fused_relu_threshold_backward_1[grid(25600)](buf2, primals_4, buf20, 25600, XBLOCK=256, num_warps=4, num_stages=1) del primals_4 buf3 = empty_strided_cuda((64, 300), (300, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf2, (64, 400), (400, 1), 0), reinterpret_tensor(primals_5, (400, 300), (1, 400), 0), out=buf3) buf4 = empty_strided_cuda((4, 4, 4, 300), (4864, 1216, 300, 1), torch.float32) buf19 = empty_strided_cuda((4, 4, 4, 300), (5120, 1280, 300, 1), torch.bool) triton_poi_fused_relu_threshold_backward_2[grid(19200)](buf3, primals_6, buf4, buf19, 19200, XBLOCK=256, num_warps=4, num_stages=1) del primals_6 buf5 = buf3 del buf3 triton_poi_fused_relu_view_3[grid(19200)](buf4, buf5, 19200, XBLOCK =128, num_warps=4, num_stages=1) buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_8, buf5, reinterpret_tensor(primals_7, (300, 4), (1, 300), 0), alpha=1, beta=1, out=buf6) del primals_8 buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_mul_tanh_4[grid(256)](buf6, buf7, 256, XBLOCK=256, num_warps=4, num_stages=1) buf8 = empty_strided_cuda((64, 400), (400, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf0, (64, 8), (8, 1), 0), reinterpret_tensor(primals_9, (8, 400), (1, 8), 0), out=buf8) del primals_9 buf9 = reinterpret_tensor(buf8, (4, 4, 4, 400), (6400, 1600, 400, 1), 0 ) del buf8 buf18 = empty_strided_cuda((4, 4, 4, 400), (6656, 1664, 400, 1), torch.bool) triton_poi_fused_relu_threshold_backward_1[grid(25600)](buf9, primals_10, buf18, 25600, XBLOCK=256, num_warps=4, num_stages=1) del primals_10 buf10 = empty_strided_cuda((64, 300), (300, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf9, (64, 400), (400, 1), 0), reinterpret_tensor(primals_11, (400, 300), (1, 400), 0), out=buf10) buf11 = buf4 del buf4 buf17 = empty_strided_cuda((4, 4, 4, 300), (5120, 1280, 300, 1), torch.bool) triton_poi_fused_relu_threshold_backward_2[grid(19200)](buf10, primals_12, buf11, buf17, 19200, XBLOCK=256, num_warps=4, num_stages=1) del primals_12 buf12 = buf10 del buf10 triton_poi_fused_relu_view_3[grid(19200)](buf11, buf12, 19200, XBLOCK=128, num_warps=4, num_stages=1) del buf11 buf13 = empty_strided_cuda((64, 16), (16, 1), torch.float32) extern_kernels.addmm(primals_14, buf12, reinterpret_tensor( primals_13, (300, 16), (1, 300), 0), alpha=1, beta=1, out=buf13) del primals_14 buf14 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf15 = reinterpret_tensor(buf14, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf14 buf16 = empty_strided_cuda((4, 4, 4, 16), (256, 64, 16, 1), torch. float32) triton_per_fused_div_linalg_vector_norm_5[grid(64)](buf15, buf13, buf16, 64, 16, XBLOCK=32, num_warps=4, num_stages=1) return (buf7, buf16, reinterpret_tensor(buf0, (64, 8), (8, 1), 0), reinterpret_tensor(buf2, (64, 400), (400, 1), 0), buf5, buf6, reinterpret_tensor(buf9, (64, 400), (400, 1), 0), buf12, buf13, buf15, primals_13, buf17, primals_11, buf18, primals_7, buf19, primals_5, buf20) class MLPBase(nn.Module): def __init__(self, num_inputs, num_outputs): super(MLPBase, self).__init__() self.l1 = nn.Linear(num_inputs, 400) self.l2 = nn.Linear(400, 300) self.l3 = nn.Linear(300, num_outputs) def forward(self, inputs): x = F.relu(self.l1(inputs)) x = F.relu(self.l2(x)) x = self.l3(x) return x class ActorDownActionNew(nn.Module): """a top-down module used in bothway message passing that passes messages to children and outputs action""" def __init__(self, self_input_dim, action_dim, msg_dim, max_action, max_children): super(ActorDownActionNew, self).__init__() self.max_action = max_action self.action_base = MLPBase(self_input_dim + msg_dim, action_dim) self.msg_base = MLPBase(self_input_dim + msg_dim, msg_dim * max_children) def forward(self, input_0, input_1): primals_3 = self.action_base.l1.weight primals_4 = self.action_base.l1.bias primals_5 = self.action_base.l2.weight primals_6 = self.action_base.l2.bias primals_7 = self.action_base.l3.weight primals_8 = self.action_base.l3.bias primals_9 = self.msg_base.l1.weight primals_10 = self.msg_base.l1.bias primals_11 = self.msg_base.l2.weight primals_12 = self.msg_base.l2.bias primals_13 = self.msg_base.l3.weight primals_14 = self.msg_base.l3.bias primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14]) return output[0], output[1]
TachikakaMin/dreamer-torch
ActorDownAction
false
1,138
[ "MIT" ]
0
3c99526f4507e28cf8b34ada0321001adcf8ae1f
https://github.com/TachikakaMin/dreamer-torch/tree/3c99526f4507e28cf8b34ada0321001adcf8ae1f
import torch from torch import nn import torch.nn.functional as F class MLPBase(nn.Module): def __init__(self, num_inputs, num_outputs): super().__init__() self.l1 = nn.Linear(num_inputs, 400) self.l2 = nn.Linear(400, 300) self.l3 = nn.Linear(300, num_outputs) def forward(self, inputs): x = F.relu(self.l1(inputs)) x = F.relu(self.l2(x)) x = self.l3(x) return x class Model(nn.Module): """a top-down module used in bothway message passing that passes messages to children and outputs action""" def __init__(self, self_input_dim, action_dim, msg_dim, max_action, max_children): super().__init__() self.max_action = max_action self.action_base = MLPBase(self_input_dim + msg_dim, action_dim) self.msg_base = MLPBase(self_input_dim + msg_dim, msg_dim * max_children) def forward(self, x, m): xm = torch.cat((x, m), dim=-1) xm = torch.tanh(xm) action = self.max_action * torch.tanh(self.action_base(xm)) msg_down = self.msg_base(xm) msg_down = F.normalize(msg_down, dim=-1) return action, msg_down def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'self_input_dim': 4, 'action_dim': 4, 'msg_dim': 4, 'max_action': 4, 'max_children': 4}]
UpsampleConvLayer
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/3g/c3g7vlanljv77gridfmcaf7xvc2k6yt5nwslumydb6jkvzhxp24z.py # Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten._unsafe_index, aten.reflection_pad2d] # Source node to ATen node mapping: # x => _unsafe_index # x_1 => _unsafe_index_1, _unsafe_index_2 # Graph fragment: # %_unsafe_index : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_1, [None, None, %unsqueeze, %convert_element_type_1]), kwargs = {}) # %_unsafe_index_1 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index, [None, None, %sub_1, None]), kwargs = {}) # %_unsafe_index_2 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index_1, [None, None, None, %sub_1]), kwargs = {}) triton_poi_fused__unsafe_index_reflection_pad2d_0 = async_compile.triton('triton_poi_fused__unsafe_index_reflection_pad2d_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_reflection_pad2d_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__unsafe_index_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 8) % 8 x0 = xindex % 8 x2 = (xindex // 64) x5 = xindex tmp0 = 3 + ((-1)*(tl_math.abs((-3) + (tl_math.abs((-2) + x1))))) tmp1 = tmp0.to(tl.float32) tmp2 = 1.0 tmp3 = tmp1 * tmp2 tmp4 = tmp3.to(tl.int32) tmp5 = 3 + ((-1)*(tl_math.abs((-3) + (tl_math.abs((-2) + x0))))) tmp6 = tmp5.to(tl.float32) tmp7 = tmp6 * tmp2 tmp8 = tmp7.to(tl.int32) tmp9 = tl.load(in_ptr0 + (tmp8 + (4*tmp4) + (16*x2)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (x5), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/42/c42i6kggymcvforsoo45syfc6w3ujwnd3pxalcsjxkelshjyz7gv.py # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution] # Source node to ATen node mapping: # x_2 => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_2, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 25) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32) # Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten._unsafe_index, aten.reflection_pad2d] stream0 = get_raw_stream(0) triton_poi_fused__unsafe_index_reflection_pad2d_0.run(primals_1, buf0, 1024, grid=grid(1024), stream=stream0) del primals_1 # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 5, 5), (100, 25, 5, 1)) buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution] triton_poi_fused_convolution_1.run(buf2, primals_3, 400, grid=grid(400), stream=stream0) del primals_3 return (buf2, primals_2, buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class UpsampleConvLayer(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride, scale_factor): super(UpsampleConvLayer, self).__init__() self._scale_factor = scale_factor self._reflection_pad = nn.ReflectionPad2d(kernel_size // 2) self._conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride) def forward(self, x): x = nn.functional.interpolate(x, mode='nearest', scale_factor=self. _scale_factor) x = self._reflection_pad(x) x = self._conv(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4, 'stride': 1, 'scale_factor': 1.0}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__unsafe_index_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 8 % 8 x0 = xindex % 8 x2 = xindex // 64 x5 = xindex tmp0 = 3 + -1 * tl_math.abs(-3 + tl_math.abs(-2 + x1)) tmp1 = tmp0.to(tl.float32) tmp2 = 1.0 tmp3 = tmp1 * tmp2 tmp4 = tmp3.to(tl.int32) tmp5 = 3 + -1 * tl_math.abs(-3 + tl_math.abs(-2 + x0)) tmp6 = tmp5.to(tl.float32) tmp7 = tmp6 * tmp2 tmp8 = tmp7.to(tl.int32) tmp9 = tl.load(in_ptr0 + (tmp8 + 4 * tmp4 + 16 * x2), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x5, tmp9, xmask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 25 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32) get_raw_stream(0) triton_poi_fused__unsafe_index_reflection_pad2d_0[grid(1024)](primals_1 , buf0, 1024, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 5, 5), (100, 25, 5, 1)) buf2 = buf1 del buf1 triton_poi_fused_convolution_1[grid(400)](buf2, primals_3, 400, XBLOCK=256, num_warps=4, num_stages=1) del primals_3 return buf2, primals_2, buf0 class UpsampleConvLayerNew(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride, scale_factor): super(UpsampleConvLayerNew, self).__init__() self._scale_factor = scale_factor self._reflection_pad = nn.ReflectionPad2d(kernel_size // 2) self._conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride) def forward(self, input_0): primals_1 = self._conv.weight primals_3 = self._conv.bias primals_2 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
ThomasRanvier/cnn_style_transfer
UpsampleConvLayer
false
1,139
[ "MIT" ]
0
90b6c76c20263c22f4e45184d572284726ecbd7b
https://github.com/ThomasRanvier/cnn_style_transfer/tree/90b6c76c20263c22f4e45184d572284726ecbd7b
import torch import torch.nn as nn class Model(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride, scale_factor): super().__init__() self._scale_factor = scale_factor self._reflection_pad = nn.ReflectionPad2d(kernel_size // 2) self._conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride) def forward(self, x): x = nn.functional.interpolate(x, mode='nearest', scale_factor=self. _scale_factor) x = self._reflection_pad(x) x = self._conv(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4, 'stride': 1, 'scale_factor': 1.0}]
StructuralProbe
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/lc/clc2yzg4fyrzlue3igzxk6yznmjxztgiekr3dkkq6awe43hpqv34.py # Topologically Sorted Source Nodes: [diffs, squared_diffs, squared_distances], Original ATen: [aten.sub, aten.pow, aten.sum] # Source node to ATen node mapping: # diffs => sub # squared_diffs => pow_1 # squared_distances => sum_1 # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%expand, %permute), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [-1]), kwargs = {}) triton_poi_fused_pow_sub_sum_0 = async_compile.triton('triton_poi_fused_pow_sub_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_pow_sub_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_pow_sub_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = (xindex // 4) x0 = xindex % 4 x2 = (xindex // 16) x4 = xindex tmp0 = tl.load(in_ptr0 + (4*x3), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + ((4*x0) + (16*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (1 + (4*x3)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + (4*x0) + (16*x2)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (2 + (4*x3)), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr0 + (2 + (4*x0) + (16*x2)), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr0 + (3 + (4*x3)), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr0 + (3 + (4*x0) + (16*x2)), xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp6 = tmp4 - tmp5 tmp7 = tmp6 * tmp6 tmp8 = tmp3 + tmp7 tmp11 = tmp9 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tmp8 + tmp12 tmp16 = tmp14 - tmp15 tmp17 = tmp16 * tmp16 tmp18 = tmp13 + tmp17 tl.store(out_ptr0 + (x4), tmp18, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [transformed], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), primals_1, out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [diffs, squared_diffs, squared_distances], Original ATen: [aten.sub, aten.pow, aten.sum] stream0 = get_raw_stream(0) triton_poi_fused_pow_sub_sum_0.run(buf0, buf1, 64, grid=grid(64), stream=stream0) return (buf1, buf0, reinterpret_tensor(primals_2, (4, 16), (1, 4), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.utils.data.dataloader class StructuralProbe(nn.Module): """ Computes squared L2 distance after projection by a matrix. For a batch of sentences, computes all n^2 pairs of distances for each sentence in the batch. """ def __init__(self, model_dim, rank, device): super().__init__() self.probe_rank = rank self.model_dim = model_dim self.proj = nn.Parameter(data=torch.zeros(self.model_dim, self. probe_rank)) nn.init.uniform_(self.proj, -0.05, 0.05) self def forward(self, batch): """ Computes all n^2 pairs of distances after projection for each sentence in a batch. Note that due to padding, some distances will be non-zero for pads. Computes (B(h_i-h_j))^T(B(h_i-h_j)) for all i,j Args: batch: a batch of word representations of the shape (batch_size, max_seq_len, representation_dim) Returns: A tensor of distances of shape (batch_size, max_seq_len, max_seq_len) """ transformed = torch.matmul(batch, self.proj) _batchlen, seqlen, _rank = transformed.size() transformed = transformed.unsqueeze(2) transformed = transformed.expand(-1, -1, seqlen, -1) transposed = transformed.transpose(1, 2) diffs = transformed - transposed squared_diffs = diffs.pow(2) squared_distances = torch.sum(squared_diffs, -1) return squared_distances def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'model_dim': 4, 'rank': 4, 'device': 0}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.utils.data.dataloader assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_pow_sub_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex // 4 x0 = xindex % 4 x2 = xindex // 16 x4 = xindex tmp0 = tl.load(in_ptr0 + 4 * x3, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (4 * x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (1 + 4 * x3), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x0 + 16 * x2), xmask, eviction_policy ='evict_last') tmp9 = tl.load(in_ptr0 + (2 + 4 * x3), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr0 + (2 + 4 * x0 + 16 * x2), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr0 + (3 + 4 * x3), xmask, eviction_policy='evict_last' ) tmp15 = tl.load(in_ptr0 + (3 + 4 * x0 + 16 * x2), xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp6 = tmp4 - tmp5 tmp7 = tmp6 * tmp6 tmp8 = tmp3 + tmp7 tmp11 = tmp9 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tmp8 + tmp12 tmp16 = tmp14 - tmp15 tmp17 = tmp16 * tmp16 tmp18 = tmp13 + tmp17 tl.store(out_ptr0 + x4, tmp18, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), primals_1, out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_pow_sub_sum_0[grid(64)](buf0, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) return buf1, buf0, reinterpret_tensor(primals_2, (4, 16), (1, 4), 0) class StructuralProbeNew(nn.Module): """ Computes squared L2 distance after projection by a matrix. For a batch of sentences, computes all n^2 pairs of distances for each sentence in the batch. """ def __init__(self, model_dim, rank, device): super().__init__() self.probe_rank = rank self.model_dim = model_dim self.proj = nn.Parameter(data=torch.zeros(self.model_dim, self. probe_rank)) nn.init.uniform_(self.proj, -0.05, 0.05) self def forward(self, input_0): primals_1 = self.proj primals_2 = input_0 output = call([primals_1, primals_2]) return output[0]
TimO96/NLP2
StructuralProbe
false
1,140
[ "MIT" ]
0
83f65a385457f68397c641f38b53df0110282578
https://github.com/TimO96/NLP2/tree/83f65a385457f68397c641f38b53df0110282578
import torch import torch.nn as nn import torch.utils.data.dataloader class Model(nn.Module): """ Computes squared L2 distance after projection by a matrix. For a batch of sentences, computes all n^2 pairs of distances for each sentence in the batch. """ def __init__(self, model_dim, rank, device): super().__init__() self.probe_rank = rank self.model_dim = model_dim self.proj = nn.Parameter(data=torch.zeros(self.model_dim, self. probe_rank)) nn.init.uniform_(self.proj, -0.05, 0.05) self def forward(self, batch): """ Computes all n^2 pairs of distances after projection for each sentence in a batch. Note that due to padding, some distances will be non-zero for pads. Computes (B(h_i-h_j))^T(B(h_i-h_j)) for all i,j Args: batch: a batch of word representations of the shape (batch_size, max_seq_len, representation_dim) Returns: A tensor of distances of shape (batch_size, max_seq_len, max_seq_len) """ transformed = torch.matmul(batch, self.proj) _batchlen, seqlen, _rank = transformed.size() transformed = transformed.unsqueeze(2) transformed = transformed.expand(-1, -1, seqlen, -1) transposed = transformed.transpose(1, 2) diffs = transformed - transposed squared_diffs = diffs.pow(2) squared_distances = torch.sum(squared_diffs, -1) return squared_distances def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [4, 4, 0]