entry_point
stringlengths
1
65
original_triton_code
stringlengths
4.5k
619k
python_code
stringlengths
208
60.9k
triton_code
stringlengths
1.15k
275k
repo_name
stringlengths
7
115
module_name
stringlengths
1
65
synthetic
bool
1 class
uuid
int64
0
18.5k
licenses
sequencelengths
1
6
stars
int64
0
19.8k
sha
stringlengths
40
40
repo_link
stringlengths
72
180
pytorch_code
stringlengths
200
4.05k
ShuffleCatAlt
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/aw/cawqvw2zvkxbionktisrfw2aqhxd4u3wzzm3vh4bdlbsoeuycdt3.py # Topologically Sorted Source Nodes: [x, setitem, setitem_1], Original ATen: [aten.zeros, aten.copy] # Source node to ATen node mapping: # setitem => copy # setitem_1 => copy_1 # x => full # Graph fragment: # %full : [num_users=2] = call_function[target=torch.ops.aten.full.default](args = ([4, 8, 4, 4], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %copy : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_2, %arg0_1), kwargs = {}) # %slice_scatter_default : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%full, %copy, 1, 0, 9223372036854775807, 2), kwargs = {}) # %copy_1 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_9, %arg1_1), kwargs = {}) # %slice_scatter_default_1 : [num_users=1] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default, %copy_1, 1, 1, 9223372036854775807, 2), kwargs = {}) triton_poi_fused_copy_zeros_0 = async_compile.triton('triton_poi_fused_copy_zeros_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_copy_zeros_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_copy_zeros_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 16) % 8 x0 = xindex % 16 x2 = (xindex // 128) x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 1, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = ((-1) + x1) % 2 tmp4 = tl.full([1], 0, tl.int64) tmp5 = tmp3 == tmp4 tmp6 = tmp2 & tmp5 tmp7 = tl.load(in_ptr0 + (x0 + (16*(triton_helpers.div_floor_integer((-1) + x1, 2))) + (64*x2)), tmp6 & xmask, other=0.0) tmp8 = ((x3 // 16) % 8) % 2 tmp9 = tmp8 == tmp4 tmp10 = tl.load(in_ptr1 + (x0 + (16*(x1 // 2)) + (64*x2)), tmp9 & xmask, other=0.0) tmp11 = 0.0 tmp12 = tl.where(tmp9, tmp10, tmp11) tmp13 = tl.where(tmp6, tmp7, tmp12) tl.store(out_ptr0 + (x3), tmp13, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x, setitem, setitem_1], Original ATen: [aten.zeros, aten.copy] stream0 = get_raw_stream(0) triton_poi_fused_copy_zeros_0.run(arg1_1, arg0_1, buf0, 512, grid=grid(512), stream=stream0) del arg0_1 del arg1_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class ShuffleCatAlt(nn.Module): def forward(self, a, b): assert a.size() == b.size() n, c, h, w = a.size() x = torch.zeros(n, c * 2, h, w, dtype=a.dtype, device=a.device) x[:, ::2] = a x[:, 1::2] = b return x def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_copy_zeros_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 % 8 x0 = xindex % 16 x2 = xindex // 128 x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 1, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = (-1 + x1) % 2 tmp4 = tl.full([1], 0, tl.int64) tmp5 = tmp3 == tmp4 tmp6 = tmp2 & tmp5 tmp7 = tl.load(in_ptr0 + (x0 + 16 * triton_helpers.div_floor_integer(-1 + x1, 2) + 64 * x2), tmp6 & xmask, other=0.0) tmp8 = x3 // 16 % 8 % 2 tmp9 = tmp8 == tmp4 tmp10 = tl.load(in_ptr1 + (x0 + 16 * (x1 // 2) + 64 * x2), tmp9 & xmask, other=0.0) tmp11 = 0.0 tmp12 = tl.where(tmp9, tmp10, tmp11) tmp13 = tl.where(tmp6, tmp7, tmp12) tl.store(out_ptr0 + x3, tmp13, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_copy_zeros_0[grid(512)](arg1_1, arg0_1, buf0, 512, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf0, class ShuffleCatAltNew(nn.Module): def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
AbhinandanVellanki/yolact_edge
ShuffleCatAlt
false
1,953
[ "MIT" ]
0
06d6318cf70ef511b19aa1c14f0476e4ffac2722
https://github.com/AbhinandanVellanki/yolact_edge/tree/06d6318cf70ef511b19aa1c14f0476e4ffac2722
import torch import torch.nn as nn class Model(nn.Module): def forward(self, a, b): assert a.size() == b.size() n, c, h, w = a.size() x = torch.zeros(n, c * 2, h, w, dtype=a.dtype, device=a.device) x[:, ::2] = a x[:, 1::2] = b return x def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
ShuffleCat
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/v2/cv2wbrn67x3upvxhrdjbyuxrruoda2nun4vk2i36aflm43yrihqo.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.clone] # Source node to ATen node mapping: # x => clone_2 # Graph fragment: # %clone_2 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_2,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 128 x1 = (xindex // 128) x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 64, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((16*x1) + (64*((x0 // 16) % 4)) + (x0 % 16)), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 128, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.load(in_ptr1 + ((16*x1) + (64*((((-64) + x0) // 16) % 4)) + (((-64) + x0) % 16)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + (x2), tmp10, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 128), (128, 1), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.clone] stream0 = get_raw_stream(0) triton_poi_fused_clone_0.run(arg0_1, arg1_1, buf0, 512, grid=grid(512), stream=stream0) del arg0_1 del arg1_1 return (reinterpret_tensor(buf0, (4, 8, 4, 4), (16, 64, 4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class ShuffleCat(nn.Module): def forward(self, a, b): assert a.size() == b.size() n, c, h, w = a.size() a = a.permute(0, 2, 3, 1).contiguous().view(-1, c) b = b.permute(0, 2, 3, 1).contiguous().view(-1, c) x = torch.cat((a, b), dim=0).transpose(1, 0).contiguous() x = x.view(c * 2, n, h, w).permute(1, 0, 2, 3) return x def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 128 x1 = xindex // 128 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 64, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (16 * x1 + 64 * (x0 // 16 % 4) + x0 % 16), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 128, tl.int64) tmp9 = tl.load(in_ptr1 + (16 * x1 + 64 * ((-64 + x0) // 16 % 4) + (-64 + x0) % 16), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x2, tmp10, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 128), (128, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(512)](arg0_1, arg1_1, buf0, 512, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return reinterpret_tensor(buf0, (4, 8, 4, 4), (16, 64, 4, 1), 0), class ShuffleCatNew(nn.Module): def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
AbhinandanVellanki/yolact_edge
ShuffleCat
false
1,954
[ "MIT" ]
0
06d6318cf70ef511b19aa1c14f0476e4ffac2722
https://github.com/AbhinandanVellanki/yolact_edge/tree/06d6318cf70ef511b19aa1c14f0476e4ffac2722
import torch import torch.nn as nn class Model(nn.Module): def forward(self, a, b): assert a.size() == b.size() n, c, h, w = a.size() a = a.permute(0, 2, 3, 1).contiguous().view(-1, c) b = b.permute(0, 2, 3, 1).contiguous().view(-1, c) x = torch.cat((a, b), dim=0).transpose(1, 0).contiguous() x = x.view(c * 2, n, h, w).permute(1, 0, 2, 3) return x def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
Cat
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/ie/ciettq2a3562jfpgfe75iig4ki2hbm6pmbwujlvp6mw26i2odufm.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.cat] # Source node to ATen node mapping: # x => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%arg0_1, %arg1_1], 1), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 16) % 8 x0 = xindex % 16 x2 = (xindex // 128) x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + (16*x1) + (64*x2)), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.load(in_ptr1 + (x0 + (16*((-4) + x1)) + (64*x2)), tmp6 & xmask, other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + (x3), tmp10, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(arg0_1, arg1_1, buf0, 512, grid=grid(512), stream=stream0) del arg0_1 del arg1_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class Cat(nn.Module): def forward(self, a, b): x = torch.cat((a, b), dim=1) return x def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 % 8 x0 = xindex % 16 x2 = xindex // 128 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (x0 + 16 * (-4 + x1) + 64 * x2), tmp6 & xmask, other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x3, tmp10, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(512)](arg0_1, arg1_1, buf0, 512, XBLOCK =256, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf0, class CatNew(nn.Module): def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
AbhinandanVellanki/yolact_edge
Cat
false
1,955
[ "MIT" ]
0
06d6318cf70ef511b19aa1c14f0476e4ffac2722
https://github.com/AbhinandanVellanki/yolact_edge/tree/06d6318cf70ef511b19aa1c14f0476e4ffac2722
import torch import torch.nn as nn class Model(nn.Module): def forward(self, a, b): x = torch.cat((a, b), dim=1) return x def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
ShuffleCatChunk
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/dl/cdlfqj3mg6jwrndr4tkmqp7icd3w2jx3njkmbledpu4xxwhhejsd.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.cat] # Source node to ATen node mapping: # x => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%getitem, %getitem_4, %getitem_1, %getitem_5, %getitem_2, %getitem_6, %getitem_3, %getitem_7], 1), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 16) % 8 x0 = xindex % 16 x2 = (xindex // 128) x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + (64*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 2, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + (x0 + (64*x2)), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1], 3, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tmp11 & tmp13 tmp15 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), tmp14 & xmask, eviction_policy='evict_last', other=0.0) tmp16 = tmp0 >= tmp12 tmp17 = tl.full([1], 4, tl.int64) tmp18 = tmp0 < tmp17 tmp19 = tmp16 & tmp18 tmp20 = tl.load(in_ptr1 + (16 + x0 + (64*x2)), tmp19 & xmask, eviction_policy='evict_last', other=0.0) tmp21 = tmp0 >= tmp17 tmp22 = tl.full([1], 5, tl.int64) tmp23 = tmp0 < tmp22 tmp24 = tmp21 & tmp23 tmp25 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), tmp24 & xmask, eviction_policy='evict_last', other=0.0) tmp26 = tmp0 >= tmp22 tmp27 = tl.full([1], 6, tl.int64) tmp28 = tmp0 < tmp27 tmp29 = tmp26 & tmp28 tmp30 = tl.load(in_ptr1 + (32 + x0 + (64*x2)), tmp29 & xmask, eviction_policy='evict_last', other=0.0) tmp31 = tmp0 >= tmp27 tmp32 = tl.full([1], 7, tl.int64) tmp33 = tmp0 < tmp32 tmp34 = tmp31 & tmp33 tmp35 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), tmp34 & xmask, eviction_policy='evict_last', other=0.0) tmp36 = tmp0 >= tmp32 tmp37 = tl.full([1], 8, tl.int64) tmp38 = tmp0 < tmp37 tmp39 = tl.load(in_ptr1 + (48 + x0 + (64*x2)), tmp36 & xmask, eviction_policy='evict_last', other=0.0) tmp40 = tl.where(tmp34, tmp35, tmp39) tmp41 = tl.where(tmp29, tmp30, tmp40) tmp42 = tl.where(tmp24, tmp25, tmp41) tmp43 = tl.where(tmp19, tmp20, tmp42) tmp44 = tl.where(tmp14, tmp15, tmp43) tmp45 = tl.where(tmp9, tmp10, tmp44) tmp46 = tl.where(tmp4, tmp5, tmp45) tl.store(out_ptr0 + (x3), tmp46, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(arg0_1, arg1_1, buf0, 512, grid=grid(512), stream=stream0) del arg0_1 del arg1_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class ShuffleCatChunk(nn.Module): def forward(self, a, b): assert a.size() == b.size() _n, c, _h, _w = a.size() a = torch.chunk(a, chunks=c, dim=1) b = torch.chunk(b, chunks=c, dim=1) x = [None] * (c * 2) x[::2] = a x[1::2] = b x = torch.cat(x, dim=1) return x def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 % 8 x0 = xindex % 16 x2 = xindex // 128 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 64 * x2), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 2, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + (x0 + 64 * x2), tmp9 & xmask, eviction_policy ='evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1], 3, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tmp11 & tmp13 tmp15 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), tmp14 & xmask, eviction_policy='evict_last', other=0.0) tmp16 = tmp0 >= tmp12 tmp17 = tl.full([1], 4, tl.int64) tmp18 = tmp0 < tmp17 tmp19 = tmp16 & tmp18 tmp20 = tl.load(in_ptr1 + (16 + x0 + 64 * x2), tmp19 & xmask, eviction_policy='evict_last', other=0.0) tmp21 = tmp0 >= tmp17 tmp22 = tl.full([1], 5, tl.int64) tmp23 = tmp0 < tmp22 tmp24 = tmp21 & tmp23 tmp25 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), tmp24 & xmask, eviction_policy='evict_last', other=0.0) tmp26 = tmp0 >= tmp22 tmp27 = tl.full([1], 6, tl.int64) tmp28 = tmp0 < tmp27 tmp29 = tmp26 & tmp28 tmp30 = tl.load(in_ptr1 + (32 + x0 + 64 * x2), tmp29 & xmask, eviction_policy='evict_last', other=0.0) tmp31 = tmp0 >= tmp27 tmp32 = tl.full([1], 7, tl.int64) tmp33 = tmp0 < tmp32 tmp34 = tmp31 & tmp33 tmp35 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), tmp34 & xmask, eviction_policy='evict_last', other=0.0) tmp36 = tmp0 >= tmp32 tl.full([1], 8, tl.int64) tmp39 = tl.load(in_ptr1 + (48 + x0 + 64 * x2), tmp36 & xmask, eviction_policy='evict_last', other=0.0) tmp40 = tl.where(tmp34, tmp35, tmp39) tmp41 = tl.where(tmp29, tmp30, tmp40) tmp42 = tl.where(tmp24, tmp25, tmp41) tmp43 = tl.where(tmp19, tmp20, tmp42) tmp44 = tl.where(tmp14, tmp15, tmp43) tmp45 = tl.where(tmp9, tmp10, tmp44) tmp46 = tl.where(tmp4, tmp5, tmp45) tl.store(out_ptr0 + x3, tmp46, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(512)](arg0_1, arg1_1, buf0, 512, XBLOCK =128, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf0, class ShuffleCatChunkNew(nn.Module): def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
AbhinandanVellanki/yolact_edge
ShuffleCatChunk
false
1,956
[ "MIT" ]
0
06d6318cf70ef511b19aa1c14f0476e4ffac2722
https://github.com/AbhinandanVellanki/yolact_edge/tree/06d6318cf70ef511b19aa1c14f0476e4ffac2722
import torch import torch.nn as nn class Model(nn.Module): def forward(self, a, b): assert a.size() == b.size() _n, c, _h, _w = a.size() a = torch.chunk(a, chunks=c, dim=1) b = torch.chunk(b, chunks=c, dim=1) x = [None] * (c * 2) x[::2] = a x[1::2] = b x = torch.cat(x, dim=1) return x def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
UpsampleConvLayer
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/wl/cwldpc2k6v7rbizd6tlddleva3alwxblabsherkqjtef5e45djwk.py # Topologically Sorted Source Nodes: [out], Original ATen: [aten.reflection_pad2d] # Source node to ATen node mapping: # out => _unsafe_index, _unsafe_index_1 # Graph fragment: # %_unsafe_index : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_1, [None, None, %sub_1, None]), kwargs = {}) # %_unsafe_index_1 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index, [None, None, None, %sub_1]), kwargs = {}) triton_poi_fused_reflection_pad2d_0 = async_compile.triton('triton_poi_fused_reflection_pad2d_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_reflection_pad2d_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = (xindex // 8) % 8 x2 = (xindex // 64) x3 = xindex tmp0 = tl.load(in_ptr0 + (15 + ((-1)*(tl_math.abs((-3) + (tl_math.abs((-2) + x0))))) + ((-4)*(tl_math.abs((-3) + (tl_math.abs((-2) + x1))))) + (16*x2)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (x3), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/42/c42i6kggymcvforsoo45syfc6w3ujwnd3pxalcsjxkelshjyz7gv.py # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.convolution] # Source node to ATen node mapping: # out_1 => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_1, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 25) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32) # Topologically Sorted Source Nodes: [out], Original ATen: [aten.reflection_pad2d] stream0 = get_raw_stream(0) triton_poi_fused_reflection_pad2d_0.run(primals_1, buf0, 1024, grid=grid(1024), stream=stream0) del primals_1 # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 5, 5), (100, 25, 5, 1)) buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.convolution] triton_poi_fused_convolution_1.run(buf2, primals_3, 400, grid=grid(400), stream=stream0) del primals_3 return (buf2, primals_2, buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.onnx class UpsampleConvLayer(torch.nn.Module): """UpsampleConvLayer Upsamples the input and then does a convolution. This method gives better results compared to ConvTranspose2d. ref: http://distill.pub/2016/deconv-checkerboard/ """ def __init__(self, in_channels, out_channels, kernel_size, stride, upsample=None): super(UpsampleConvLayer, self).__init__() self.upsample = upsample if upsample: self.upsample_layer = torch.nn.Upsample(mode='nearest', scale_factor=upsample) reflection_padding = kernel_size // 2 self.reflection_pad = torch.nn.ReflectionPad2d(reflection_padding) self.conv2d = torch.nn.Conv2d(in_channels, out_channels, kernel_size, stride) def forward(self, x): x_in = x if self.upsample: x_in = self.upsample_layer(x_in) out = self.reflection_pad(x_in) out = self.conv2d(out) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4, 'stride': 1}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.onnx assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 % 8 x2 = xindex // 64 x3 = xindex tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-2 + x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-2 + x1)) + 16 * x2), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x3, tmp0, xmask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 25 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32) get_raw_stream(0) triton_poi_fused_reflection_pad2d_0[grid(1024)](primals_1, buf0, 1024, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 5, 5), (100, 25, 5, 1)) buf2 = buf1 del buf1 triton_poi_fused_convolution_1[grid(400)](buf2, primals_3, 400, XBLOCK=256, num_warps=4, num_stages=1) del primals_3 return buf2, primals_2, buf0 class UpsampleConvLayerNew(torch.nn.Module): """UpsampleConvLayer Upsamples the input and then does a convolution. This method gives better results compared to ConvTranspose2d. ref: http://distill.pub/2016/deconv-checkerboard/ """ def __init__(self, in_channels, out_channels, kernel_size, stride, upsample=None): super(UpsampleConvLayerNew, self).__init__() self.upsample = upsample if upsample: self.upsample_layer = torch.nn.Upsample(mode='nearest', scale_factor=upsample) reflection_padding = kernel_size // 2 self.reflection_pad = torch.nn.ReflectionPad2d(reflection_padding) self.conv2d = torch.nn.Conv2d(in_channels, out_channels, kernel_size, stride) def forward(self, input_0): primals_1 = self.conv2d.weight primals_3 = self.conv2d.bias primals_2 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
Ali-ry/azureml-examples
UpsampleConvLayer
false
1,957
[ "MIT" ]
0
817ae89d2766dcafd70937a22cb3a80f100a2906
https://github.com/Ali-ry/azureml-examples/tree/817ae89d2766dcafd70937a22cb3a80f100a2906
import torch import torch.onnx class Model(torch.nn.Module): """UpsampleConvLayer Upsamples the input and then does a convolution. This method gives better results compared to ConvTranspose2d. ref: http://distill.pub/2016/deconv-checkerboard/ """ def __init__(self, in_channels, out_channels, kernel_size, stride, upsample=None): super().__init__() self.upsample = upsample if upsample: self.upsample_layer = torch.nn.Upsample(mode='nearest', scale_factor=upsample) reflection_padding = kernel_size // 2 self.reflection_pad = torch.nn.ReflectionPad2d(reflection_padding) self.conv2d = torch.nn.Conv2d(in_channels, out_channels, kernel_size, stride) def forward(self, x): x_in = x if self.upsample: x_in = self.upsample_layer(x_in) out = self.reflection_pad(x_in) out = self.conv2d(out) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4, 'stride': 1}]
BehlerAngular
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/u6/cu6io2odujubs4jc63atgai7gg2fi2mzfw74zg77lxaq5wh3dsrx.py # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] # Source node to ATen node mapping: # cat => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%mul, %mul_1], -1), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 2 x1 = (xindex // 2) x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x1), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = 1.0 tmp7 = tmp6 - tmp5 tmp8 = tmp7 * tmp6 tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype) tmp10 = tl.where(tmp4, tmp8, tmp9) tmp11 = tmp0 >= tmp3 tmp12 = tl.full([1], 2, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tl.load(in_ptr0 + (x1), tmp11 & xmask, eviction_policy='evict_last', other=0.0) tmp15 = tmp14 + tmp6 tmp16 = tmp15 * tmp6 tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype) tmp18 = tl.where(tmp11, tmp16, tmp17) tmp19 = tl.where(tmp4, tmp10, tmp18) tl.store(out_ptr0 + (x2), tmp19, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4, 2), (128, 32, 8, 2, 1), torch.float32) # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(arg0_1, buf0, 512, grid=grid(512), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn as nn class BehlerAngular(nn.Module): """ Compute Behler type angular contribution of the angle spanned by three atoms: :math:`2^{(1-\\zeta)} (1 + \\lambda \\cos( {\\theta}_{ijk} ) )^\\zeta` Sets of zetas with lambdas of -1 and +1 are generated automatically. Args: zetas (set of int): Set of exponents used to compute angular Behler term (default={1}) """ def __init__(self, zetas={1}): super(BehlerAngular, self).__init__() self.zetas = zetas def forward(self, cos_theta): """ Args: cos_theta (torch.Tensor): Cosines between all pairs of neighbors of the central atom. Returns: torch.Tensor: Tensor containing values of the angular filters. """ angular_pos = [(2 ** (1 - zeta) * ((1.0 - cos_theta) ** zeta). unsqueeze(-1)) for zeta in self.zetas] angular_neg = [(2 ** (1 - zeta) * ((1.0 + cos_theta) ** zeta). unsqueeze(-1)) for zeta in self.zetas] angular_all = angular_pos + angular_neg return torch.cat(angular_all, -1) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 2 x1 = xindex // 2 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + x1, tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = 1.0 tmp7 = tmp6 - tmp5 tmp8 = tmp7 * tmp6 tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype) tmp10 = tl.where(tmp4, tmp8, tmp9) tmp11 = tmp0 >= tmp3 tl.full([1], 2, tl.int64) tmp14 = tl.load(in_ptr0 + x1, tmp11 & xmask, eviction_policy= 'evict_last', other=0.0) tmp15 = tmp14 + tmp6 tmp16 = tmp15 * tmp6 tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype) tmp18 = tl.where(tmp11, tmp16, tmp17) tmp19 = tl.where(tmp4, tmp10, tmp18) tl.store(out_ptr0 + x2, tmp19, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4, 2), (128, 32, 8, 2, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(512)](arg0_1, buf0, 512, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf0, class BehlerAngularNew(nn.Module): """ Compute Behler type angular contribution of the angle spanned by three atoms: :math:`2^{(1-\\zeta)} (1 + \\lambda \\cos( {\\theta}_{ijk} ) )^\\zeta` Sets of zetas with lambdas of -1 and +1 are generated automatically. Args: zetas (set of int): Set of exponents used to compute angular Behler term (default={1}) """ def __init__(self, zetas={1}): super(BehlerAngularNew, self).__init__() self.zetas = zetas def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
AntonCh-G/schnetpack
BehlerAngular
false
1,958
[ "MIT" ]
0
16f48d59b18415c18c9e324e3c3f9ebb24ce9f0d
https://github.com/AntonCh-G/schnetpack/tree/16f48d59b18415c18c9e324e3c3f9ebb24ce9f0d
import torch from torch import nn as nn class Model(nn.Module): """ Compute Behler type angular contribution of the angle spanned by three atoms: :math:`2^{(1-\\zeta)} (1 + \\lambda \\cos( {\\theta}_{ijk} ) )^\\zeta` Sets of zetas with lambdas of -1 and +1 are generated automatically. Args: zetas (set of int): Set of exponents used to compute angular Behler term (default={1}) """ def __init__(self, zetas={1}): super().__init__() self.zetas = zetas def forward(self, cos_theta): """ Args: cos_theta (torch.Tensor): Cosines between all pairs of neighbors of the central atom. Returns: torch.Tensor: Tensor containing values of the angular filters. """ angular_pos = [(2 ** (1 - zeta) * ((1.0 - cos_theta) ** zeta). unsqueeze(-1)) for zeta in self.zetas] angular_neg = [(2 ** (1 - zeta) * ((1.0 + cos_theta) ** zeta). unsqueeze(-1)) for zeta in self.zetas] angular_all = angular_pos + angular_neg return torch.cat(angular_all, -1) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
ScaledLeakyReLU
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/af/cafbecncoz4yz235zm7ksda376chkgo5sb7kp2dtqqfihowumwo5.py # Topologically Sorted Source Nodes: [out, mul], Original ATen: [aten.leaky_relu, aten.mul] # Source node to ATen node mapping: # mul => mul_1 # out => gt, mul, where # Graph fragment: # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%arg0_1, 0), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, 0.2), kwargs = {}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %arg0_1, %mul), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%where, 1.4142135623730951), kwargs = {}) triton_poi_fused_leaky_relu_mul_0 = async_compile.triton('triton_poi_fused_leaky_relu_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_leaky_relu_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 0.2 tmp4 = tmp0 * tmp3 tmp5 = tl.where(tmp2, tmp0, tmp4) tmp6 = 1.4142135623730951 tmp7 = tmp5 * tmp6 tl.store(out_ptr0 + (x0), tmp7, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [out, mul], Original ATen: [aten.leaky_relu, aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_leaky_relu_mul_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch from torch import nn import torch.nn.functional as F class ScaledLeakyReLU(nn.Module): def __init__(self, negative_slope=0.2): super().__init__() self.negative_slope = negative_slope def forward(self, input): out = F.leaky_relu(input, negative_slope=self.negative_slope) return out * math.sqrt(2) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_leaky_relu_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 0.2 tmp4 = tmp0 * tmp3 tmp5 = tl.where(tmp2, tmp0, tmp4) tmp6 = 1.4142135623730951 tmp7 = tmp5 * tmp6 tl.store(out_ptr0 + x0, tmp7, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_leaky_relu_mul_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class ScaledLeakyReLUNew(nn.Module): def __init__(self, negative_slope=0.2): super().__init__() self.negative_slope = negative_slope def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
ArashVahabpour/encoder4editing
ScaledLeakyReLU
false
1,959
[ "MIT" ]
0
819b90ecd7397fbe2ab7cb30eb451dab0f3149fd
https://github.com/ArashVahabpour/encoder4editing/tree/819b90ecd7397fbe2ab7cb30eb451dab0f3149fd
import math import torch from torch import nn import torch.nn.functional as F class Model(nn.Module): def __init__(self, negative_slope=0.2): super().__init__() self.negative_slope = negative_slope def forward(self, input): out = F.leaky_relu(input, negative_slope=self.negative_slope) return out * math.sqrt(2) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
Aggregate
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/wq/cwq3pmymyuowy4xq7gkn2nwrwrq6bpjfz67zohrfhdkptqayuc4m.py # Topologically Sorted Source Nodes: [y], Original ATen: [aten.sum] # Source node to ATen node mapping: # y => sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%arg0_1, [4]), kwargs = {}) triton_poi_fused_sum_0 = async_compile.triton('triton_poi_fused_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tl.store(out_ptr0 + (x0), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [y], Original ATen: [aten.sum] stream0 = get_raw_stream(0) triton_poi_fused_sum_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn as nn class Aggregate(nn.Module): """Pooling layer based on sum or average with optional masking. Args: axis (int): axis along which pooling is done. mean (bool, optional): if True, use average instead for sum pooling. keepdim (bool, optional): whether the output tensor has dim retained or not. """ def __init__(self, axis, mean=False, keepdim=True): super(Aggregate, self).__init__() self.average = mean self.axis = axis self.keepdim = keepdim def forward(self, input, mask=None): """Compute layer output. Args: input (torch.Tensor): input data. mask (torch.Tensor, optional): mask to be applied; e.g. neighbors mask. Returns: torch.Tensor: layer output. """ mask = None if mask is not None: input = input * mask[..., None] y = torch.sum(input, self.axis) if self.average: if mask is not None: N = torch.sum(mask, self.axis, keepdim=self.keepdim) N = torch.max(N, other=torch.ones_like(N)) else: N = input.size(self.axis) y = y / N return y def get_inputs(): return [torch.rand([4, 4, 4, 4, 4])] def get_init_inputs(): return [[], {'axis': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tl.store(out_ptr0 + x0, tmp6, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_sum_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf0, class AggregateNew(nn.Module): """Pooling layer based on sum or average with optional masking. Args: axis (int): axis along which pooling is done. mean (bool, optional): if True, use average instead for sum pooling. keepdim (bool, optional): whether the output tensor has dim retained or not. """ def __init__(self, axis, mean=False, keepdim=True): super(AggregateNew, self).__init__() self.average = mean self.axis = axis self.keepdim = keepdim def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
AntonCh-G/schnetpack
Aggregate
false
1,960
[ "MIT" ]
0
16f48d59b18415c18c9e324e3c3f9ebb24ce9f0d
https://github.com/AntonCh-G/schnetpack/tree/16f48d59b18415c18c9e324e3c3f9ebb24ce9f0d
import torch from torch import nn as nn class Model(nn.Module): """Pooling layer based on sum or average with optional masking. Args: axis (int): axis along which pooling is done. mean (bool, optional): if True, use average instead for sum pooling. keepdim (bool, optional): whether the output tensor has dim retained or not. """ def __init__(self, axis, mean=False, keepdim=True): super().__init__() self.average = mean self.axis = axis self.keepdim = keepdim def forward(self, input, mask=None): """Compute layer output. Args: input (torch.Tensor): input data. mask (torch.Tensor, optional): mask to be applied; e.g. neighbors mask. Returns: torch.Tensor: layer output. """ mask = None if mask is not None: input = input * mask[..., None] y = torch.sum(input, self.axis) if self.average: if mask is not None: N = torch.sum(mask, self.axis, keepdim=self.keepdim) N = torch.max(N, other=torch.ones_like(N)) else: N = input.size(self.axis) y = y / N return y def get_inputs(): return [torch.rand([4, 4, 4, 4, 4])] def get_init_inputs(): return [4]
LayerNorm
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/oh/cohq46gttdcyxdmtlmo72ewca32fmayfkfldbc7vs7dtnr4xjyoy.py # Topologically Sorted Source Nodes: [mean, var, add, sqrt], Original ATen: [aten.mean, aten.var, aten.add, aten.sqrt] # Source node to ATen node mapping: # add => add # mean => mean # sqrt => sqrt # var => var # Graph fragment: # %mean : [num_users=2] = call_function[target=torch.ops.aten.mean.dim](args = (%primals_1, [0, 1], True), kwargs = {}) # %var : [num_users=1] = call_function[target=torch.ops.aten.var.correction](args = (%primals_1, [0, 1]), kwargs = {correction: 0, keepdim: True}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%var, 1e-06), kwargs = {}) # %sqrt : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%add,), kwargs = {}) triton_per_fused_add_mean_sqrt_var_0 = async_compile.triton('triton_per_fused_add_mean_sqrt_var_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[16, 16], reduction_hint=ReductionHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_mean_sqrt_var_0', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_mean_sqrt_var_0(in_out_ptr0, in_out_ptr1, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 16 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (16*r1)), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp6 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp8 = tl.where(xmask, tmp6, 0) tmp9 = tl.sum(tmp8, 1)[:, None] tmp10 = tl.full([XBLOCK, 1], 16, tl.int32) tmp11 = tmp10.to(tl.float32) tmp12 = tmp9 / tmp11 tmp13 = tmp1 - tmp12 tmp14 = tmp13 * tmp13 tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK]) tmp17 = tl.where(xmask, tmp15, 0) tmp18 = tl.sum(tmp17, 1)[:, None] tmp19 = 16.0 tmp20 = tmp4 / tmp19 tmp21 = tmp18 / tmp19 tmp22 = 1e-06 tmp23 = tmp21 + tmp22 tmp24 = libdevice.sqrt(tmp23) tl.debug_barrier() tl.store(in_out_ptr0 + (x0), tmp20, xmask) tl.debug_barrier() tl.store(in_out_ptr1 + (x0), tmp24, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/bx/cbxh7gf7fephxa5jsta2lxxqrtxqcskhc24dijme7fvn5im7kvd2.py # Topologically Sorted Source Nodes: [sub, mul, truediv, x], Original ATen: [aten.sub, aten.mul, aten.div, aten.add] # Source node to ATen node mapping: # mul => mul # sub => sub # truediv => div # x => add_1 # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_1, %mean), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %sub), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul, %sqrt), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%div, %primals_3), kwargs = {}) triton_poi_fused_add_div_mul_sub_1 = async_compile.triton('triton_poi_fused_add_div_mul_sub_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mul_sub_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_mul_sub_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x3 = xindex x4 = xindex % 16 tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x3), xmask) tmp2 = tl.load(in_ptr2 + (x4), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x4), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last') tmp3 = tmp1 - tmp2 tmp4 = tmp0 * tmp3 tmp6 = tmp4 / tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + (x3), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1, 1, 4), (4, 4, 1)) assert_size_stride(primals_3, (1, 1, 4), (4, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((1, 1, 4, 4), (16, 16, 4, 1), torch.float32) buf3 = empty_strided_cuda((1, 1, 4, 4), (16, 16, 4, 1), torch.float32) buf1 = buf0; del buf0 # reuse buf5 = buf3; del buf3 # reuse # Topologically Sorted Source Nodes: [mean, var, add, sqrt], Original ATen: [aten.mean, aten.var, aten.add, aten.sqrt] stream0 = get_raw_stream(0) triton_per_fused_add_mean_sqrt_var_0.run(buf1, buf5, primals_1, 16, 16, grid=grid(16), stream=stream0) buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [sub, mul, truediv, x], Original ATen: [aten.sub, aten.mul, aten.div, aten.add] triton_poi_fused_add_div_mul_sub_1.run(primals_2, primals_1, buf1, buf5, primals_3, buf6, 256, grid=grid(256), stream=stream0) del primals_2 del primals_3 return (buf6, primals_1, buf1, buf5, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((1, 1, 4), (4, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((1, 1, 4), (4, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.utils.data import torch.nn as nn class LayerNorm(nn.Module): def __init__(self, d): super().__init__() self.a = nn.Parameter(torch.ones(d).unsqueeze(0).unsqueeze(0)) self.b = nn.Parameter(torch.zeros(d).unsqueeze(0).unsqueeze(0)) def forward(self, x): mean = x.mean(dim=(0, 1), keepdim=True) var = x.var(dim=(0, 1), keepdim=True, unbiased=False) x = self.a * (x - mean) / torch.sqrt(var + 1e-06) + self.b return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'d': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.utils.data import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_mean_sqrt_var_0(in_out_ptr0, in_out_ptr1, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * r1), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp6 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp8 = tl.where(xmask, tmp6, 0) tmp9 = tl.sum(tmp8, 1)[:, None] tmp10 = tl.full([XBLOCK, 1], 16, tl.int32) tmp11 = tmp10.to(tl.float32) tmp12 = tmp9 / tmp11 tmp13 = tmp1 - tmp12 tmp14 = tmp13 * tmp13 tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK]) tmp17 = tl.where(xmask, tmp15, 0) tmp18 = tl.sum(tmp17, 1)[:, None] tmp19 = 16.0 tmp20 = tmp4 / tmp19 tmp21 = tmp18 / tmp19 tmp22 = 1e-06 tmp23 = tmp21 + tmp22 tmp24 = libdevice.sqrt(tmp23) tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp20, xmask) tl.debug_barrier() tl.store(in_out_ptr1 + x0, tmp24, xmask) @triton.jit def triton_poi_fused_add_div_mul_sub_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x3 = xindex x4 = xindex % 16 tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x3, xmask) tmp2 = tl.load(in_ptr2 + x4, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x4, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 - tmp2 tmp4 = tmp0 * tmp3 tmp6 = tmp4 / tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1, 1, 4), (4, 4, 1)) assert_size_stride(primals_3, (1, 1, 4), (4, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((1, 1, 4, 4), (16, 16, 4, 1), torch.float32) buf3 = empty_strided_cuda((1, 1, 4, 4), (16, 16, 4, 1), torch.float32) buf1 = buf0 del buf0 buf5 = buf3 del buf3 get_raw_stream(0) triton_per_fused_add_mean_sqrt_var_0[grid(16)](buf1, buf5, primals_1, 16, 16, XBLOCK=8, num_warps=2, num_stages=1) buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_div_mul_sub_1[grid(256)](primals_2, primals_1, buf1, buf5, primals_3, buf6, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 del primals_3 return buf6, primals_1, buf1, buf5 class LayerNormNew(nn.Module): def __init__(self, d): super().__init__() self.a = nn.Parameter(torch.ones(d).unsqueeze(0).unsqueeze(0)) self.b = nn.Parameter(torch.zeros(d).unsqueeze(0).unsqueeze(0)) def forward(self, input_0): primals_2 = self.a primals_3 = self.b primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
AntoBcc/benchmarking-gnns
LayerNorm
false
1,961
[ "MIT" ]
0
c5750054b2f4ba0822f203fa18d382f6a3b16542
https://github.com/AntoBcc/benchmarking-gnns/tree/c5750054b2f4ba0822f203fa18d382f6a3b16542
import torch import torch.utils.data import torch.nn as nn class Model(nn.Module): def __init__(self, d): super().__init__() self.a = nn.Parameter(torch.ones(d).unsqueeze(0).unsqueeze(0)) self.b = nn.Parameter(torch.zeros(d).unsqueeze(0).unsqueeze(0)) def forward(self, x): mean = x.mean(dim=(0, 1), keepdim=True) var = x.var(dim=(0, 1), keepdim=True, unbiased=False) x = self.a * (x - mean) / torch.sqrt(var + 1e-06) + self.b return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4]
EgoAttention
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/pk/cpk5n3igge6w4dypc2un5gad324fghftzwfdytoko35prykob5rm.py # Topologically Sorted Source Nodes: [input_all], Original ATen: [aten.cat] # Source node to ATen node mapping: # input_all => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_2, %primals_1], 1), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 64) % 2 x0 = xindex % 64 x2 = (xindex // 128) x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + (64*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 2, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.load(in_ptr1 + (x0 + (64*x2)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + (x3), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/dw/cdwnta4op3j5o5gpvwxq4g5ehtysablbdwmrhp2xr424jkdvzuqq.py # Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone] # Source node to ATen node mapping: # matmul => clone # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_1,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_1 = async_compile.triton('triton_poi_fused_clone_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256, 2], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 256 xnumel = 2 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 64 y1 = (yindex // 64) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (64*x2) + (128*y1)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + (2*y3)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/2b/c2bsssxh3o2n7ut56wpjw35pnbyaxf2zcjh6ya5mujjdgn2ezcvf.py # Topologically Sorted Source Nodes: [wrapped_sqrt, p_attn], Original ATen: [aten.sqrt, aten._softmax] # Source node to ATen node mapping: # p_attn => div_1, exp, sum_1 # wrapped_sqrt => full_default # Graph fragment: # %full_default : [num_users=2] = call_function[target=torch.ops.aten.full.default](args = ([], 4.0), kwargs = {dtype: torch.float64, layout: torch.strided, device: cpu, pin_memory: False}) # %scalar_tensor_default : [num_users=2] = call_function[target=torch.ops.aten.scalar_tensor.default](args = (1,), kwargs = {dtype: torch.float32, device: cuda:0, pin_memory: False}) # %ge_scalar : [num_users=1] = call_function[target=torch.ops.aten.ge.Scalar](args = (%full_default, 0), kwargs = {}) # %neg_default : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%scalar_tensor_default,), kwargs = {}) # %where_self : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%ge_scalar, %scalar_tensor_default, %neg_default), kwargs = {}) # %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_12, %where_self), kwargs = {}) # %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [-1], True), kwargs = {}) # %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {}) # %mul_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%where_self, %full_default), kwargs = {}) # %div_tensor : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor, %mul_tensor_1), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_sqrt_2 = async_compile.triton('triton_poi_fused__softmax_sqrt_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_sqrt_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_sqrt_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 2) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp8 = tl.load(in_ptr0 + (2*x1), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr0 + (1 + (2*x1)), xmask, eviction_policy='evict_last') tmp1 = tl.full([1], 4.0, tl.float64) tmp2 = tl.full([1], 0.0, tl.float64) tmp3 = tmp1 >= tmp2 tmp4 = 1.0 tmp5 = -1.0 tmp6 = tl.where(tmp3, tmp4, tmp5) tmp7 = tmp0 * tmp6 tmp9 = tmp8 * tmp6 tmp11 = tmp10 * tmp6 tmp12 = triton_helpers.maximum(tmp9, tmp11) tmp13 = tmp7 - tmp12 tmp14 = tmp6.to(tl.float64) tmp15 = tmp14 * tmp1 tmp16 = tmp15.to(tl.float32) tmp17 = tmp13 / tmp16 tmp18 = tl_math.exp(tmp17) tmp19 = tmp9 - tmp12 tmp20 = tmp19 / tmp16 tmp21 = tl_math.exp(tmp20) tmp22 = tmp11 - tmp12 tmp23 = tmp22 / tmp16 tmp24 = tl_math.exp(tmp23) tmp25 = tmp21 + tmp24 tmp26 = tmp18 / tmp25 tl.store(out_ptr0 + (x2), tmp26, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/f4/cf45yxwk7d3bjoqarfjnwtphtjpu6m6khjd2oobodhjyifufvyoy.py # Topologically Sorted Source Nodes: [output], Original ATen: [aten.clone] # Source node to ATen node mapping: # output => clone_2 # Graph fragment: # %clone_2 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_3,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_3 = async_compile.triton('triton_poi_fused_clone_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = (xindex // 16) % 2 x2 = (xindex // 32) % 4 x3 = (xindex // 128) x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (16*x2) + (64*x1) + (128*x3)), xmask) tl.store(out_ptr0 + (x4), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/fd/cfd25pzqi5ycxpig4csiumm6vna2mmh5aqcgnk2vue3tfvwqzryj.py # Topologically Sorted Source Nodes: [add, result], Original ATen: [aten.add, aten.div] # Source node to ATen node mapping: # add => add # result => div_2 # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_3, %squeeze), kwargs = {}) # %div_2 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add, 2), kwargs = {}) triton_poi_fused_add_div_4 = async_compile.triton('triton_poi_fused_add_div_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask) tmp2 = tmp0 + tmp1 tmp3 = 0.5 tmp4 = tmp2 * tmp3 tl.store(in_out_ptr0 + (x0), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 1, 64), (64, 64, 1)) assert_size_stride(primals_2, (4, 1, 64), (64, 64, 1)) assert_size_stride(primals_3, (64, 64), (64, 1)) assert_size_stride(primals_4, (64, 64), (64, 1)) assert_size_stride(primals_5, (64, 64), (64, 1)) assert_size_stride(primals_6, (64, 64), (64, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 2, 64), (128, 64, 1), torch.float32) # Topologically Sorted Source Nodes: [input_all], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(primals_2, primals_1, buf0, 512, grid=grid(512), stream=stream0) del primals_1 buf1 = empty_strided_cuda((8, 64), (64, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf0, (8, 64), (64, 1), 0), reinterpret_tensor(primals_3, (64, 64), (1, 64), 0), out=buf1) del primals_3 buf2 = empty_strided_cuda((8, 64), (64, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf0, (8, 64), (64, 1), 0), reinterpret_tensor(primals_4, (64, 64), (1, 64), 0), out=buf2) del primals_4 buf3 = empty_strided_cuda((4, 64), (64, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_2, (4, 64), (64, 1), 0), reinterpret_tensor(primals_5, (64, 64), (1, 64), 0), out=buf3) del primals_5 buf4 = empty_strided_cuda((4, 4, 16, 2), (128, 32, 2, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone] triton_poi_fused_clone_1.run(buf1, buf4, 256, 2, grid=grid(256, 2), stream=stream0) buf5 = empty_strided_cuda((16, 1, 2), (2, 2, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf3, (16, 1, 16), (16, 16, 1), 0), reinterpret_tensor(buf4, (16, 16, 2), (32, 2, 1), 0), out=buf5) buf6 = empty_strided_cuda((4, 4, 1, 2), (8, 2, 2, 1), torch.float32) # Topologically Sorted Source Nodes: [wrapped_sqrt, p_attn], Original ATen: [aten.sqrt, aten._softmax] triton_poi_fused__softmax_sqrt_2.run(buf5, buf6, 32, grid=grid(32), stream=stream0) del buf5 buf7 = reinterpret_tensor(buf1, (4, 4, 2, 16), (128, 32, 16, 1), 0); del buf1 # reuse # Topologically Sorted Source Nodes: [output], Original ATen: [aten.clone] triton_poi_fused_clone_3.run(buf2, buf7, 512, grid=grid(512), stream=stream0) del buf2 buf8 = empty_strided_cuda((16, 1, 16), (16, 16, 1), torch.float32) # Topologically Sorted Source Nodes: [output], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf6, (16, 1, 2), (2, 2, 1), 0), reinterpret_tensor(buf7, (16, 2, 16), (32, 16, 1), 0), out=buf8) buf9 = empty_strided_cuda((4, 64), (64, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_3], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf8, (4, 64), (64, 1), 0), reinterpret_tensor(primals_6, (64, 64), (1, 64), 0), out=buf9) buf10 = buf9; del buf9 # reuse # Topologically Sorted Source Nodes: [add, result], Original ATen: [aten.add, aten.div] triton_poi_fused_add_div_4.run(buf10, primals_2, 256, grid=grid(256), stream=stream0) return (buf10, buf6, reinterpret_tensor(buf0, (8, 64), (64, 1), 0), reinterpret_tensor(primals_2, (4, 64), (64, 1), 0), buf6, reinterpret_tensor(buf8, (4, 64), (64, 1), 0), primals_6, reinterpret_tensor(buf7, (16, 16, 2), (32, 1, 16), 0), reinterpret_tensor(buf3, (16, 16, 1), (16, 1, 64), 0), reinterpret_tensor(buf4, (16, 2, 16), (32, 1, 2), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 1, 64), (64, 64, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 1, 64), (64, 64, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((64, 64), (64, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((64, 64), (64, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((64, 64), (64, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((64, 64), (64, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import numpy as np import torch.nn as nn from torch.nn import functional as F def activation_factory(activation_type): if activation_type == 'RELU': return F.relu elif activation_type == 'TANH': return torch.tanh elif activation_type == 'ELU': return nn.ELU() else: raise ValueError('Unknown activation_type: {}'.format(activation_type)) def attention(query, key, value, mask=None, dropout=None): """ Compute a Scaled Dot Product Attention. Parameters ---------- query size: batch, head, 1 (ego-entity), features key size: batch, head, entities, features value size: batch, head, entities, features mask size: batch, head, 1 (absence feature), 1 (ego-entity) dropout Returns ------- The attention softmax(QK^T/sqrt(dk))V """ d_k = query.size(-1) scores = torch.matmul(query, key.transpose(-2, -1)) / np.sqrt(d_k) if mask is not None: scores = scores.masked_fill(mask, -1000000000.0) p_attn = F.softmax(scores, dim=-1) if dropout is not None: p_attn = dropout(p_attn) output = torch.matmul(p_attn, value) return output, p_attn class BaseModule(torch.nn.Module): """ Base torch.nn.Module implementing basic features: - initialization factory - normalization parameters """ def __init__(self, activation_type='RELU', reset_type='XAVIER'): super().__init__() self.activation = activation_factory(activation_type) self.reset_type = reset_type def _init_weights(self, m): if hasattr(m, 'weight'): if self.reset_type == 'XAVIER': torch.nn.init.xavier_uniform_(m.weight.data) elif self.reset_type == 'ZEROS': torch.nn.init.constant_(m.weight.data, 0.0) else: raise ValueError('Unknown reset type') if hasattr(m, 'bias') and m.bias is not None: torch.nn.init.constant_(m.bias.data, 0.0) def reset(self): self.apply(self._init_weights) class EgoAttention(BaseModule): def __init__(self, feature_size=64, heads=4, dropout_factor=0): super().__init__() self.feature_size = feature_size self.heads = heads self.dropout_factor = dropout_factor self.features_per_head = int(self.feature_size / self.heads) self.value_all = nn.Linear(self.feature_size, self.feature_size, bias=False) self.key_all = nn.Linear(self.feature_size, self.feature_size, bias =False) self.query_ego = nn.Linear(self.feature_size, self.feature_size, bias=False) self.attention_combine = nn.Linear(self.feature_size, self. feature_size, bias=False) @classmethod def default_config(cls): return {} def forward(self, ego, others, mask=None): batch_size = others.shape[0] n_entities = others.shape[1] + 1 input_all = torch.cat((ego.view(batch_size, 1, self.feature_size), others), dim=1) key_all = self.key_all(input_all).view(batch_size, n_entities, self .heads, self.features_per_head) value_all = self.value_all(input_all).view(batch_size, n_entities, self.heads, self.features_per_head) query_ego = self.query_ego(ego).view(batch_size, 1, self.heads, self.features_per_head) key_all = key_all.permute(0, 2, 1, 3) value_all = value_all.permute(0, 2, 1, 3) query_ego = query_ego.permute(0, 2, 1, 3) if mask is not None: mask = mask.view((batch_size, 1, 1, n_entities)).repeat((1, self.heads, 1, 1)) value, attention_matrix = attention(query_ego, key_all, value_all, mask, nn.Dropout(self.dropout_factor)) result = (self.attention_combine(value.reshape((batch_size, self. feature_size))) + ego.squeeze(1)) / 2 return result, attention_matrix def get_inputs(): return [torch.rand([4, 1, 64]), torch.rand([4, 1, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import numpy as np import torch.nn as nn from torch.nn import functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 64 % 2 x0 = xindex % 64 x2 = xindex // 128 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 64 * x2), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 2, tl.int64) tmp9 = tl.load(in_ptr1 + (x0 + 64 * x2), tmp6 & xmask, eviction_policy= 'evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x3, tmp10, xmask) @triton.jit def triton_poi_fused_clone_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 256 xnumel = 2 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 64 y1 = yindex // 64 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 64 * x2 + 128 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 2 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused__softmax_sqrt_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 2 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp8 = tl.load(in_ptr0 + 2 * x1, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr0 + (1 + 2 * x1), xmask, eviction_policy='evict_last' ) tmp1 = tl.full([1], 4.0, tl.float64) tmp2 = tl.full([1], 0.0, tl.float64) tmp3 = tmp1 >= tmp2 tmp4 = 1.0 tmp5 = -1.0 tmp6 = tl.where(tmp3, tmp4, tmp5) tmp7 = tmp0 * tmp6 tmp9 = tmp8 * tmp6 tmp11 = tmp10 * tmp6 tmp12 = triton_helpers.maximum(tmp9, tmp11) tmp13 = tmp7 - tmp12 tmp14 = tmp6.to(tl.float64) tmp15 = tmp14 * tmp1 tmp16 = tmp15.to(tl.float32) tmp17 = tmp13 / tmp16 tmp18 = tl_math.exp(tmp17) tmp19 = tmp9 - tmp12 tmp20 = tmp19 / tmp16 tmp21 = tl_math.exp(tmp20) tmp22 = tmp11 - tmp12 tmp23 = tmp22 / tmp16 tmp24 = tl_math.exp(tmp23) tmp25 = tmp21 + tmp24 tmp26 = tmp18 / tmp25 tl.store(out_ptr0 + x2, tmp26, xmask) @triton.jit def triton_poi_fused_clone_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 % 2 x2 = xindex // 32 % 4 x3 = xindex // 128 x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x2 + 64 * x1 + 128 * x3), xmask) tl.store(out_ptr0 + x4, tmp0, xmask) @triton.jit def triton_poi_fused_add_div_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask) tmp2 = tmp0 + tmp1 tmp3 = 0.5 tmp4 = tmp2 * tmp3 tl.store(in_out_ptr0 + x0, tmp4, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 1, 64), (64, 64, 1)) assert_size_stride(primals_2, (4, 1, 64), (64, 64, 1)) assert_size_stride(primals_3, (64, 64), (64, 1)) assert_size_stride(primals_4, (64, 64), (64, 1)) assert_size_stride(primals_5, (64, 64), (64, 1)) assert_size_stride(primals_6, (64, 64), (64, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 2, 64), (128, 64, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(512)](primals_2, primals_1, buf0, 512, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((8, 64), (64, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf0, (8, 64), (64, 1), 0), reinterpret_tensor(primals_3, (64, 64), (1, 64), 0), out=buf1) del primals_3 buf2 = empty_strided_cuda((8, 64), (64, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf0, (8, 64), (64, 1), 0), reinterpret_tensor(primals_4, (64, 64), (1, 64), 0), out=buf2) del primals_4 buf3 = empty_strided_cuda((4, 64), (64, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (4, 64), (64, 1), 0 ), reinterpret_tensor(primals_5, (64, 64), (1, 64), 0), out=buf3) del primals_5 buf4 = empty_strided_cuda((4, 4, 16, 2), (128, 32, 2, 1), torch.float32 ) triton_poi_fused_clone_1[grid(256, 2)](buf1, buf4, 256, 2, XBLOCK=2, YBLOCK=64, num_warps=4, num_stages=1) buf5 = empty_strided_cuda((16, 1, 2), (2, 2, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf3, (16, 1, 16), (16, 16, 1 ), 0), reinterpret_tensor(buf4, (16, 16, 2), (32, 2, 1), 0), out=buf5) buf6 = empty_strided_cuda((4, 4, 1, 2), (8, 2, 2, 1), torch.float32) triton_poi_fused__softmax_sqrt_2[grid(32)](buf5, buf6, 32, XBLOCK= 32, num_warps=1, num_stages=1) del buf5 buf7 = reinterpret_tensor(buf1, (4, 4, 2, 16), (128, 32, 16, 1), 0) del buf1 triton_poi_fused_clone_3[grid(512)](buf2, buf7, 512, XBLOCK=128, num_warps=4, num_stages=1) del buf2 buf8 = empty_strided_cuda((16, 1, 16), (16, 16, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf6, (16, 1, 2), (2, 2, 1), 0), reinterpret_tensor(buf7, (16, 2, 16), (32, 16, 1), 0), out=buf8 ) buf9 = empty_strided_cuda((4, 64), (64, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf8, (4, 64), (64, 1), 0), reinterpret_tensor(primals_6, (64, 64), (1, 64), 0), out=buf9) buf10 = buf9 del buf9 triton_poi_fused_add_div_4[grid(256)](buf10, primals_2, 256, XBLOCK =128, num_warps=4, num_stages=1) return buf10, buf6, reinterpret_tensor(buf0, (8, 64), (64, 1), 0 ), reinterpret_tensor(primals_2, (4, 64), (64, 1), 0 ), buf6, reinterpret_tensor(buf8, (4, 64), (64, 1), 0 ), primals_6, reinterpret_tensor(buf7, (16, 16, 2), (32, 1, 16), 0 ), reinterpret_tensor(buf3, (16, 16, 1), (16, 1, 64), 0 ), reinterpret_tensor(buf4, (16, 2, 16), (32, 1, 2), 0) def activation_factory(activation_type): if activation_type == 'RELU': return F.relu elif activation_type == 'TANH': return torch.tanh elif activation_type == 'ELU': return nn.ELU() else: raise ValueError('Unknown activation_type: {}'.format(activation_type)) def attention(query, key, value, mask=None, dropout=None): """ Compute a Scaled Dot Product Attention. Parameters ---------- query size: batch, head, 1 (ego-entity), features key size: batch, head, entities, features value size: batch, head, entities, features mask size: batch, head, 1 (absence feature), 1 (ego-entity) dropout Returns ------- The attention softmax(QK^T/sqrt(dk))V """ d_k = query.size(-1) scores = torch.matmul(query, key.transpose(-2, -1)) / np.sqrt(d_k) if mask is not None: scores = scores.masked_fill(mask, -1000000000.0) p_attn = F.softmax(scores, dim=-1) if dropout is not None: p_attn = dropout(p_attn) output = torch.matmul(p_attn, value) return output, p_attn class BaseModule(torch.nn.Module): """ Base torch.nn.Module implementing basic features: - initialization factory - normalization parameters """ def __init__(self, activation_type='RELU', reset_type='XAVIER'): super().__init__() self.activation = activation_factory(activation_type) self.reset_type = reset_type def _init_weights(self, m): if hasattr(m, 'weight'): if self.reset_type == 'XAVIER': torch.nn.init.xavier_uniform_(m.weight.data) elif self.reset_type == 'ZEROS': torch.nn.init.constant_(m.weight.data, 0.0) else: raise ValueError('Unknown reset type') if hasattr(m, 'bias') and m.bias is not None: torch.nn.init.constant_(m.bias.data, 0.0) def reset(self): self.apply(self._init_weights) class EgoAttentionNew(BaseModule): def __init__(self, feature_size=64, heads=4, dropout_factor=0): super().__init__() self.feature_size = feature_size self.heads = heads self.dropout_factor = dropout_factor self.features_per_head = int(self.feature_size / self.heads) self.value_all = nn.Linear(self.feature_size, self.feature_size, bias=False) self.key_all = nn.Linear(self.feature_size, self.feature_size, bias =False) self.query_ego = nn.Linear(self.feature_size, self.feature_size, bias=False) self.attention_combine = nn.Linear(self.feature_size, self. feature_size, bias=False) @classmethod def default_config(cls): return {} def forward(self, input_0, input_1): primals_3 = self.value_all.weight primals_4 = self.key_all.weight primals_5 = self.query_ego.weight primals_6 = self.attention_combine.weight primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0], output[1]
AmiEis/highway-env
EgoAttention
false
1,962
[ "MIT" ]
0
7477d8234aa34447292ed92e7da547eac20f9d8e
https://github.com/AmiEis/highway-env/tree/7477d8234aa34447292ed92e7da547eac20f9d8e
import torch import numpy as np import torch.nn as nn from torch.nn import functional as F def activation_factory(activation_type): if activation_type == 'RELU': return F.relu elif activation_type == 'TANH': return torch.tanh elif activation_type == 'ELU': return nn.ELU() else: raise ValueError('Unknown activation_type: {}'.format(activation_type)) def attention(query, key, value, mask=None, dropout=None): """ Compute a Scaled Dot Product Attention. Parameters ---------- query size: batch, head, 1 (ego-entity), features key size: batch, head, entities, features value size: batch, head, entities, features mask size: batch, head, 1 (absence feature), 1 (ego-entity) dropout Returns ------- The attention softmax(QK^T/sqrt(dk))V """ d_k = query.size(-1) scores = torch.matmul(query, key.transpose(-2, -1)) / np.sqrt(d_k) if mask is not None: scores = scores.masked_fill(mask, -1000000000.0) p_attn = F.softmax(scores, dim=-1) if dropout is not None: p_attn = dropout(p_attn) output = torch.matmul(p_attn, value) return output, p_attn class BaseModule(torch.nn.Module): """ Base torch.nn.Module implementing basic features: - initialization factory - normalization parameters """ def __init__(self, activation_type='RELU', reset_type='XAVIER'): super().__init__() self.activation = activation_factory(activation_type) self.reset_type = reset_type def _init_weights(self, m): if hasattr(m, 'weight'): if self.reset_type == 'XAVIER': torch.nn.init.xavier_uniform_(m.weight.data) elif self.reset_type == 'ZEROS': torch.nn.init.constant_(m.weight.data, 0.0) else: raise ValueError('Unknown reset type') if hasattr(m, 'bias') and m.bias is not None: torch.nn.init.constant_(m.bias.data, 0.0) def reset(self): self.apply(self._init_weights) class Model(BaseModule): def __init__(self, feature_size=64, heads=4, dropout_factor=0): super().__init__() self.feature_size = feature_size self.heads = heads self.dropout_factor = dropout_factor self.features_per_head = int(self.feature_size / self.heads) self.value_all = nn.Linear(self.feature_size, self.feature_size, bias=False) self.key_all = nn.Linear(self.feature_size, self.feature_size, bias =False) self.query_ego = nn.Linear(self.feature_size, self.feature_size, bias=False) self.attention_combine = nn.Linear(self.feature_size, self. feature_size, bias=False) @classmethod def default_config(cls): return {} def forward(self, ego, others, mask=None): batch_size = others.shape[0] n_entities = others.shape[1] + 1 input_all = torch.cat((ego.view(batch_size, 1, self.feature_size), others), dim=1) key_all = self.key_all(input_all).view(batch_size, n_entities, self .heads, self.features_per_head) value_all = self.value_all(input_all).view(batch_size, n_entities, self.heads, self.features_per_head) query_ego = self.query_ego(ego).view(batch_size, 1, self.heads, self.features_per_head) key_all = key_all.permute(0, 2, 1, 3) value_all = value_all.permute(0, 2, 1, 3) query_ego = query_ego.permute(0, 2, 1, 3) if mask is not None: mask = mask.view((batch_size, 1, 1, n_entities)).repeat((1, self.heads, 1, 1)) value, attention_matrix = attention(query_ego, key_all, value_all, mask, nn.Dropout(self.dropout_factor)) result = (self.attention_combine(value.reshape((batch_size, self. feature_size))) + ego.squeeze(1)) / 2 ret # ... truncated (>4000 chars) for memory efficiency
PixelNorm
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/qa/cqaynfy4pksjylutfotzmvjyfwpallfpefdyp3nouvnarntc363b.py # Topologically Sorted Source Nodes: [pow_1, mean, add, rsqrt, mul], Original ATen: [aten.pow, aten.mean, aten.add, aten.rsqrt, aten.mul] # Source node to ATen node mapping: # add => add # mean => mean # mul => mul # pow_1 => pow_1 # rsqrt => rsqrt # Graph fragment: # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg0_1, 2), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%pow_1, [1], True), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean, 1e-08), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %rsqrt), kwargs = {}) triton_poi_fused_add_mean_mul_pow_rsqrt_0 = async_compile.triton('triton_poi_fused_add_mean_mul_pow_rsqrt_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mean_mul_pow_rsqrt_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mean_mul_pow_rsqrt_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = 4.0 tmp13 = tmp11 / tmp12 tmp14 = 1e-08 tmp15 = tmp13 + tmp14 tmp16 = libdevice.rsqrt(tmp15) tmp17 = tmp0 * tmp16 tl.store(out_ptr0 + (x3), tmp17, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [pow_1, mean, add, rsqrt, mul], Original ATen: [aten.pow, aten.mean, aten.add, aten.rsqrt, aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_add_mean_mul_pow_rsqrt_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn class PixelNorm(nn.Module): def __init__(self): super().__init__() def forward(self, input): return input * torch.rsqrt(torch.mean(input ** 2, dim=1, keepdim= True) + 1e-08) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_mean_mul_pow_rsqrt_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = 4.0 tmp13 = tmp11 / tmp12 tmp14 = 1e-08 tmp15 = tmp13 + tmp14 tmp16 = libdevice.rsqrt(tmp15) tmp17 = tmp0 * tmp16 tl.store(out_ptr0 + x3, tmp17, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mean_mul_pow_rsqrt_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class PixelNormNew(nn.Module): def __init__(self): super().__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
ArashVahabpour/encoder4editing
PixelNorm
false
1,963
[ "MIT" ]
0
819b90ecd7397fbe2ab7cb30eb451dab0f3149fd
https://github.com/ArashVahabpour/encoder4editing/tree/819b90ecd7397fbe2ab7cb30eb451dab0f3149fd
import torch from torch import nn class Model(nn.Module): def __init__(self): super().__init__() def forward(self, input): return input * torch.rsqrt(torch.mean(input ** 2, dim=1, keepdim= True) + 1e-08) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
RMSELoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/fg/cfgtp4ynxj6iswly436iktrb6rezfyctxdoohnh4z556bxpa2jv4.py # Topologically Sorted Source Nodes: [mse_loss, loss], Original ATen: [aten.mse_loss, aten.sqrt] # Source node to ATen node mapping: # loss => sqrt # mse_loss => mean, pow_1, sub # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %arg0_1), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%pow_1,), kwargs = {}) # %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%mean,), kwargs = {}) triton_per_fused_mse_loss_sqrt_0 = async_compile.triton('triton_per_fused_mse_loss_sqrt_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mse_loss_sqrt_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_mse_loss_sqrt_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.load(in_ptr1 + (r0), None) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp4 = tl.broadcast_to(tmp3, [RBLOCK]) tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0)) tmp7 = 256.0 tmp8 = tmp6 / tmp7 tmp9 = libdevice.sqrt(tmp8) tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp9, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [mse_loss, loss], Original ATen: [aten.mse_loss, aten.sqrt] stream0 = get_raw_stream(0) triton_per_fused_mse_loss_sqrt_0.run(buf1, arg1_1, arg0_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class RMSELoss(nn.Module): def __init__(self): super().__init__() self.mse = nn.MSELoss() def forward(self, x, y): loss = torch.sqrt(self.mse(x, y)) return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_mse_loss_sqrt_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp4 = tl.broadcast_to(tmp3, [RBLOCK]) tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0)) tmp7 = 256.0 tmp8 = tmp6 / tmp7 tmp9 = libdevice.sqrt(tmp8) tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp9, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_mse_loss_sqrt_0[grid(1)](buf1, arg1_1, arg0_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class RMSELossNew(nn.Module): def __init__(self): super().__init__() self.mse = nn.MSELoss() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
Aquarium1222/Electricity-Forecasting
RMSELoss
false
1,964
[ "MIT" ]
0
9f945d3fd8006e5d77da08ff7861577965109ec8
https://github.com/Aquarium1222/Electricity-Forecasting/tree/9f945d3fd8006e5d77da08ff7861577965109ec8
import torch import torch.nn as nn class Model(nn.Module): def __init__(self): super().__init__() self.mse = nn.MSELoss() def forward(self, x, y): loss = torch.sqrt(self.mse(x, y)) return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
CORblock_Z
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/td/ctdybbibnws4d7ukbk3fpn35zkgapxylowdhzwx7vgsllncbdrxa.py # Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # x => convolution # x_1 => relu # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/zn/cznnoupxqm425i7nrya5lrbw7yqnxgeucaf7jrh623u5usdoidtu.py # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # x_2 => getitem, getitem_1 # Graph fragment: # %getitem : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {}) # %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_1 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 2) % 2 x0 = xindex % 2 x3 = (xindex // 2) x4 = xindex tmp0 = (-1) + (2*x1) tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = (-1) + (2*x0) tmp7 = tmp6 >= tmp1 tmp8 = tmp6 < tmp3 tmp9 = tmp7 & tmp8 tmp10 = tmp5 & tmp9 tmp11 = tl.load(in_ptr0 + ((-5) + (2*x0) + (8*x3)), tmp10 & xmask, eviction_policy='evict_last', other=float("-inf")) tmp12 = 2*x0 tmp13 = tmp12 >= tmp1 tmp14 = tmp12 < tmp3 tmp15 = tmp13 & tmp14 tmp16 = tmp5 & tmp15 tmp17 = tl.load(in_ptr0 + ((-4) + (2*x0) + (8*x3)), tmp16 & xmask, eviction_policy='evict_last', other=float("-inf")) tmp18 = triton_helpers.maximum(tmp17, tmp11) tmp19 = 1 + (2*x0) tmp20 = tmp19 >= tmp1 tmp21 = tmp19 < tmp3 tmp22 = tmp20 & tmp21 tmp23 = tmp5 & tmp22 tmp24 = tl.load(in_ptr0 + ((-3) + (2*x0) + (8*x3)), tmp23 & xmask, eviction_policy='evict_last', other=float("-inf")) tmp25 = triton_helpers.maximum(tmp24, tmp18) tmp26 = 2*x1 tmp27 = tmp26 >= tmp1 tmp28 = tmp26 < tmp3 tmp29 = tmp27 & tmp28 tmp30 = tmp29 & tmp9 tmp31 = tl.load(in_ptr0 + ((-1) + (2*x0) + (8*x3)), tmp30 & xmask, eviction_policy='evict_last', other=float("-inf")) tmp32 = triton_helpers.maximum(tmp31, tmp25) tmp33 = tmp29 & tmp15 tmp34 = tl.load(in_ptr0 + ((2*x0) + (8*x3)), tmp33 & xmask, eviction_policy='evict_last', other=float("-inf")) tmp35 = triton_helpers.maximum(tmp34, tmp32) tmp36 = tmp29 & tmp22 tmp37 = tl.load(in_ptr0 + (1 + (2*x0) + (8*x3)), tmp36 & xmask, eviction_policy='evict_last', other=float("-inf")) tmp38 = triton_helpers.maximum(tmp37, tmp35) tmp39 = 1 + (2*x1) tmp40 = tmp39 >= tmp1 tmp41 = tmp39 < tmp3 tmp42 = tmp40 & tmp41 tmp43 = tmp42 & tmp9 tmp44 = tl.load(in_ptr0 + (3 + (2*x0) + (8*x3)), tmp43 & xmask, eviction_policy='evict_last', other=float("-inf")) tmp45 = triton_helpers.maximum(tmp44, tmp38) tmp46 = tmp42 & tmp15 tmp47 = tl.load(in_ptr0 + (4 + (2*x0) + (8*x3)), tmp46 & xmask, eviction_policy='evict_last', other=float("-inf")) tmp48 = triton_helpers.maximum(tmp47, tmp45) tmp49 = tmp42 & tmp22 tmp50 = tl.load(in_ptr0 + (5 + (2*x0) + (8*x3)), tmp49 & xmask, eviction_policy='evict_last', other=float("-inf")) tmp51 = triton_helpers.maximum(tmp50, tmp48) tmp52 = tmp17 > tmp11 tmp53 = tl.full([1], 1, tl.int8) tmp54 = tl.full([1], 0, tl.int8) tmp55 = tl.where(tmp52, tmp53, tmp54) tmp56 = tmp24 > tmp18 tmp57 = tl.full([1], 2, tl.int8) tmp58 = tl.where(tmp56, tmp57, tmp55) tmp59 = tmp31 > tmp25 tmp60 = tl.full([1], 3, tl.int8) tmp61 = tl.where(tmp59, tmp60, tmp58) tmp62 = tmp34 > tmp32 tmp63 = tl.full([1], 4, tl.int8) tmp64 = tl.where(tmp62, tmp63, tmp61) tmp65 = tmp37 > tmp35 tmp66 = tl.full([1], 5, tl.int8) tmp67 = tl.where(tmp65, tmp66, tmp64) tmp68 = tmp44 > tmp38 tmp69 = tl.full([1], 6, tl.int8) tmp70 = tl.where(tmp68, tmp69, tmp67) tmp71 = tmp47 > tmp45 tmp72 = tl.full([1], 7, tl.int8) tmp73 = tl.where(tmp71, tmp72, tmp70) tmp74 = tmp50 > tmp48 tmp75 = tl.full([1], 8, tl.int8) tmp76 = tl.where(tmp74, tmp75, tmp73) tl.store(out_ptr0 + (x4), tmp51, xmask) tl.store(out_ptr1 + (x4), tmp76, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 256, grid=grid(256), stream=stream0) del primals_2 buf2 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32) buf3 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.int8) # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_1.run(buf1, buf2, buf3, 64, grid=grid(64), stream=stream0) return (buf2, primals_1, primals_3, buf1, buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.utils.model_zoo from torch import nn import torch.nn.parallel import torch.optim import torch.utils.data import torch.utils.data.distributed class Identity(nn.Module): """ Helper module that stores the current tensor. Useful for accessing by name """ def forward(self, x): return x class CORblock_Z(nn.Module): def __init__(self, in_channels, out_channels, kernel_size=3, stride=1): super().__init__() self.conv = nn.Conv2d(in_channels, out_channels, kernel_size= kernel_size, stride=stride, padding=kernel_size // 2) self.nonlin = nn.ReLU(inplace=True) self.pool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) self.output = Identity() def forward(self, inp): x = self.conv(inp) x = self.nonlin(x) x = self.pool(x) x = self.output(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.utils.model_zoo from torch import nn import torch.nn.parallel import torch.optim import torch.utils.data import torch.utils.data.distributed assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 2 % 2 x0 = xindex % 2 x3 = xindex // 2 x4 = xindex tmp0 = -1 + 2 * x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = -1 + 2 * x0 tmp7 = tmp6 >= tmp1 tmp8 = tmp6 < tmp3 tmp9 = tmp7 & tmp8 tmp10 = tmp5 & tmp9 tmp11 = tl.load(in_ptr0 + (-5 + 2 * x0 + 8 * x3), tmp10 & xmask, eviction_policy='evict_last', other=float('-inf')) tmp12 = 2 * x0 tmp13 = tmp12 >= tmp1 tmp14 = tmp12 < tmp3 tmp15 = tmp13 & tmp14 tmp16 = tmp5 & tmp15 tmp17 = tl.load(in_ptr0 + (-4 + 2 * x0 + 8 * x3), tmp16 & xmask, eviction_policy='evict_last', other=float('-inf')) tmp18 = triton_helpers.maximum(tmp17, tmp11) tmp19 = 1 + 2 * x0 tmp20 = tmp19 >= tmp1 tmp21 = tmp19 < tmp3 tmp22 = tmp20 & tmp21 tmp23 = tmp5 & tmp22 tmp24 = tl.load(in_ptr0 + (-3 + 2 * x0 + 8 * x3), tmp23 & xmask, eviction_policy='evict_last', other=float('-inf')) tmp25 = triton_helpers.maximum(tmp24, tmp18) tmp26 = 2 * x1 tmp27 = tmp26 >= tmp1 tmp28 = tmp26 < tmp3 tmp29 = tmp27 & tmp28 tmp30 = tmp29 & tmp9 tmp31 = tl.load(in_ptr0 + (-1 + 2 * x0 + 8 * x3), tmp30 & xmask, eviction_policy='evict_last', other=float('-inf')) tmp32 = triton_helpers.maximum(tmp31, tmp25) tmp33 = tmp29 & tmp15 tmp34 = tl.load(in_ptr0 + (2 * x0 + 8 * x3), tmp33 & xmask, eviction_policy='evict_last', other=float('-inf')) tmp35 = triton_helpers.maximum(tmp34, tmp32) tmp36 = tmp29 & tmp22 tmp37 = tl.load(in_ptr0 + (1 + 2 * x0 + 8 * x3), tmp36 & xmask, eviction_policy='evict_last', other=float('-inf')) tmp38 = triton_helpers.maximum(tmp37, tmp35) tmp39 = 1 + 2 * x1 tmp40 = tmp39 >= tmp1 tmp41 = tmp39 < tmp3 tmp42 = tmp40 & tmp41 tmp43 = tmp42 & tmp9 tmp44 = tl.load(in_ptr0 + (3 + 2 * x0 + 8 * x3), tmp43 & xmask, eviction_policy='evict_last', other=float('-inf')) tmp45 = triton_helpers.maximum(tmp44, tmp38) tmp46 = tmp42 & tmp15 tmp47 = tl.load(in_ptr0 + (4 + 2 * x0 + 8 * x3), tmp46 & xmask, eviction_policy='evict_last', other=float('-inf')) tmp48 = triton_helpers.maximum(tmp47, tmp45) tmp49 = tmp42 & tmp22 tmp50 = tl.load(in_ptr0 + (5 + 2 * x0 + 8 * x3), tmp49 & xmask, eviction_policy='evict_last', other=float('-inf')) tmp51 = triton_helpers.maximum(tmp50, tmp48) tmp52 = tmp17 > tmp11 tmp53 = tl.full([1], 1, tl.int8) tmp54 = tl.full([1], 0, tl.int8) tmp55 = tl.where(tmp52, tmp53, tmp54) tmp56 = tmp24 > tmp18 tmp57 = tl.full([1], 2, tl.int8) tmp58 = tl.where(tmp56, tmp57, tmp55) tmp59 = tmp31 > tmp25 tmp60 = tl.full([1], 3, tl.int8) tmp61 = tl.where(tmp59, tmp60, tmp58) tmp62 = tmp34 > tmp32 tmp63 = tl.full([1], 4, tl.int8) tmp64 = tl.where(tmp62, tmp63, tmp61) tmp65 = tmp37 > tmp35 tmp66 = tl.full([1], 5, tl.int8) tmp67 = tl.where(tmp65, tmp66, tmp64) tmp68 = tmp44 > tmp38 tmp69 = tl.full([1], 6, tl.int8) tmp70 = tl.where(tmp68, tmp69, tmp67) tmp71 = tmp47 > tmp45 tmp72 = tl.full([1], 7, tl.int8) tmp73 = tl.where(tmp71, tmp72, tmp70) tmp74 = tmp50 > tmp48 tmp75 = tl.full([1], 8, tl.int8) tmp76 = tl.where(tmp74, tmp75, tmp73) tl.store(out_ptr0 + x4, tmp51, xmask) tl.store(out_ptr1 + x4, tmp76, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(256)](buf1, primals_2, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32) buf3 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.int8) triton_poi_fused_max_pool2d_with_indices_1[grid(64)](buf1, buf2, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) return buf2, primals_1, primals_3, buf1, buf3 class Identity(nn.Module): """ Helper module that stores the current tensor. Useful for accessing by name """ def forward(self, x): return x class CORblock_ZNew(nn.Module): def __init__(self, in_channels, out_channels, kernel_size=3, stride=1): super().__init__() self.conv = nn.Conv2d(in_channels, out_channels, kernel_size= kernel_size, stride=stride, padding=kernel_size // 2) self.nonlin = nn.ReLU(inplace=True) self.pool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) self.output = Identity() def forward(self, input_0): primals_1 = self.conv.weight primals_2 = self.conv.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
AnnaTruzzi/cornet_analysis
CORblock_Z
false
1,965
[ "MIT" ]
0
0a2fd0c5a6b09a80d3c8a47441b08fd6129f7a2d
https://github.com/AnnaTruzzi/cornet_analysis/tree/0a2fd0c5a6b09a80d3c8a47441b08fd6129f7a2d
import torch import torch.utils.model_zoo from torch import nn import torch.nn.parallel import torch.optim import torch.utils.data import torch.utils.data.distributed class Identity(nn.Module): """ Helper module that stores the current tensor. Useful for accessing by name """ def forward(self, x): return x class Model(nn.Module): def __init__(self, in_channels, out_channels, kernel_size=3, stride=1): super().__init__() self.conv = nn.Conv2d(in_channels, out_channels, kernel_size= kernel_size, stride=stride, padding=kernel_size // 2) self.nonlin = nn.ReLU(inplace=True) self.pool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) self.output = Identity() def forward(self, inp): x = self.conv(inp) x = self.nonlin(x) x = self.pool(x) x = self.output(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 4]
InstanceNorm
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/2t/c2tlmlnddgofeoodf622jrs5dgndzay3j3z6iezkat4cpubm2kuk.py # Topologically Sorted Source Nodes: [mean, x, tmp, mean_1, add, tmp_1, mul_1], Original ATen: [aten.mean, aten.sub, aten.mul, aten.add, aten.rsqrt] # Source node to ATen node mapping: # add => add # mean => mean # mean_1 => mean_1 # mul_1 => mul_1 # tmp => mul # tmp_1 => rsqrt # x => sub # Graph fragment: # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%arg0_1, [2, 3], True), kwargs = {}) # %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %mean), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %sub), kwargs = {}) # %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%mul, [2, 3], True), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean_1, 1e-08), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {}) triton_per_fused_add_mean_mul_rsqrt_sub_0 = async_compile.triton('triton_per_fused_add_mean_mul_rsqrt_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[16, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_mean_mul_rsqrt_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_mean_mul_rsqrt_sub_0(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 16 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp5 = 16.0 tmp6 = tmp4 / tmp5 tmp7 = tmp0 - tmp6 tmp8 = tmp7 * tmp7 tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK]) tmp11 = tl.where(xmask, tmp9, 0) tmp12 = tl.sum(tmp11, 1)[:, None] tmp13 = tmp12 / tmp5 tmp14 = 1e-08 tmp15 = tmp13 + tmp14 tmp16 = libdevice.rsqrt(tmp15) tmp17 = tmp7 * tmp16 tl.store(out_ptr2 + (r1 + (16*x0)), tmp17, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mean, x, tmp, mean_1, add, tmp_1, mul_1], Original ATen: [aten.mean, aten.sub, aten.mul, aten.add, aten.rsqrt] stream0 = get_raw_stream(0) triton_per_fused_add_mean_mul_rsqrt_sub_0.run(arg0_1, buf2, 16, 16, grid=grid(16), stream=stream0) del arg0_1 return (buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.utils.data from torch import nn class InstanceNorm(nn.Module): def __init__(self, epsilon=1e-08): super(InstanceNorm, self).__init__() self.epsilon = epsilon def forward(self, x): x = x - torch.mean(x, (2, 3), True) tmp = torch.mul(x, x) tmp = torch.rsqrt(torch.mean(tmp, (2, 3), True) + self.epsilon) return x * tmp def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.utils.data from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_mean_mul_rsqrt_sub_0(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp5 = 16.0 tmp6 = tmp4 / tmp5 tmp7 = tmp0 - tmp6 tmp8 = tmp7 * tmp7 tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK]) tmp11 = tl.where(xmask, tmp9, 0) tmp12 = tl.sum(tmp11, 1)[:, None] tmp13 = tmp12 / tmp5 tmp14 = 1e-08 tmp15 = tmp13 + tmp14 tmp16 = libdevice.rsqrt(tmp15) tmp17 = tmp7 * tmp16 tl.store(out_ptr2 + (r1 + 16 * x0), tmp17, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_per_fused_add_mean_mul_rsqrt_sub_0[grid(16)](arg0_1, buf2, 16, 16, XBLOCK=8, num_warps=2, num_stages=1) del arg0_1 return buf2, class InstanceNormNew(nn.Module): def __init__(self, epsilon=1e-08): super(InstanceNormNew, self).__init__() self.epsilon = epsilon def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
Archjbald/PoseStylizer
InstanceNorm
false
1,966
[ "BSD-3-Clause" ]
0
95aae02d1f4ac83536d91b8db5f78d12e7830f97
https://github.com/Archjbald/PoseStylizer/tree/95aae02d1f4ac83536d91b8db5f78d12e7830f97
import torch import torch.utils.data from torch import nn class Model(nn.Module): def __init__(self, epsilon=1e-08): super().__init__() self.epsilon = epsilon def forward(self, x): x = x - torch.mean(x, (2, 3), True) tmp = torch.mul(x, x) tmp = torch.rsqrt(torch.mean(tmp, (2, 3), True) + self.epsilon) return x * tmp def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
SkipConnection
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/xm/cxmk2kod6zgjturywionsuihaxqils4fvzrd7bziqpvptc3rgw43.py # Topologically Sorted Source Nodes: [out], Original ATen: [aten.cat] # Source node to ATen node mapping: # out => cat # Graph fragment: # %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %primals_2], 1), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 16) % 4 x0 = xindex % 16 x2 = (xindex // 64) x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + (16*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 4, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.load(in_ptr1 + (x0 + (16*((-1) + x1)) + (48*x2)), tmp6 & xmask, other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + (x3), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/32/c32v7egt4mupqssam3gmac2qgv3ujprjybthsgweflmot256qqw7.py # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.convolution] # Source node to ATen node mapping: # out_1 => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%cat, %primals_3, %primals_4, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 1, 4, 4), (16, 16, 4, 1)) assert_size_stride(primals_2, (4, 3, 4, 4), (48, 16, 4, 1)) assert_size_stride(primals_3, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_4, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [out], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(primals_1, primals_2, buf0, 256, grid=grid(256), stream=stream0) del primals_1 del primals_2 # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(buf0, primals_3, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1)) buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.convolution] triton_poi_fused_convolution_1.run(buf2, primals_4, 256, grid=grid(256), stream=stream0) del primals_4 return (buf2, primals_3, buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 1, 4, 4), (16, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 3, 4, 4), (48, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.utils.data import torch.nn as nn def _init_weights(layer): """ Init weights of the layer :param layer: :return: """ nn.init.xavier_uniform_(layer.weight) if layer.bias is not None: nn.init.zeros_(layer.bias) class SkipConnection(nn.Module): """ Connects the two given inputs with concatenation :param in1: earlier input tensor of shape N x d1 x m x m :param in2: later input tensor of shape N x d2 x m x m :param in_features: d1+d2 :param out_features: output num of features :return: Tensor of shape N x output_depth x m x m """ def __init__(self, in_features, out_features): super().__init__() self.conv = nn.Conv2d(in_features, out_features, kernel_size=1, padding=0, bias=True) _init_weights(self.conv) def forward(self, in1, in2): out = torch.cat((in1, in2), dim=1) out = self.conv(out) return out def get_inputs(): return [torch.rand([4, 1, 4, 4]), torch.rand([4, 3, 4, 4])] def get_init_inputs(): return [[], {'in_features': 4, 'out_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.utils.data import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 % 4 x0 = xindex % 16 x2 = xindex // 64 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 16 * x2), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 4, tl.int64) tmp9 = tl.load(in_ptr1 + (x0 + 16 * (-1 + x1) + 48 * x2), tmp6 & xmask, other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x3, tmp10, xmask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 1, 4, 4), (16, 16, 4, 1)) assert_size_stride(primals_2, (4, 3, 4, 4), (48, 16, 4, 1)) assert_size_stride(primals_3, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_4, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(256)](primals_1, primals_2, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 del primals_2 buf1 = extern_kernels.convolution(buf0, primals_3, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1)) buf2 = buf1 del buf1 triton_poi_fused_convolution_1[grid(256)](buf2, primals_4, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_4 return buf2, primals_3, buf0 def _init_weights(layer): """ Init weights of the layer :param layer: :return: """ nn.init.xavier_uniform_(layer.weight) if layer.bias is not None: nn.init.zeros_(layer.bias) class SkipConnectionNew(nn.Module): """ Connects the two given inputs with concatenation :param in1: earlier input tensor of shape N x d1 x m x m :param in2: later input tensor of shape N x d2 x m x m :param in_features: d1+d2 :param out_features: output num of features :return: Tensor of shape N x output_depth x m x m """ def __init__(self, in_features, out_features): super().__init__() self.conv = nn.Conv2d(in_features, out_features, kernel_size=1, padding=0, bias=True) _init_weights(self.conv) def forward(self, input_0, input_1): primals_3 = self.conv.weight primals_4 = self.conv.bias primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
AntoBcc/benchmarking-gnns
SkipConnection
false
1,967
[ "MIT" ]
0
c5750054b2f4ba0822f203fa18d382f6a3b16542
https://github.com/AntoBcc/benchmarking-gnns/tree/c5750054b2f4ba0822f203fa18d382f6a3b16542
import torch import torch.utils.data import torch.nn as nn def _init_weights(layer): """ Init weights of the layer :param layer: :return: """ nn.init.xavier_uniform_(layer.weight) if layer.bias is not None: nn.init.zeros_(layer.bias) class Model(nn.Module): """ Connects the two given inputs with concatenation :param in1: earlier input tensor of shape N x d1 x m x m :param in2: later input tensor of shape N x d2 x m x m :param in_features: d1+d2 :param out_features: output num of features :return: Tensor of shape N x output_depth x m x m """ def __init__(self, in_features, out_features): super().__init__() self.conv = nn.Conv2d(in_features, out_features, kernel_size=1, padding=0, bias=True) _init_weights(self.conv) def forward(self, in1, in2): out = torch.cat((in1, in2), dim=1) out = self.conv(out) return out def get_inputs(): return [torch.rand([4, 1, 4, 4]), torch.rand([4, 3, 4, 4])] def get_init_inputs(): return [4, 4]
DQN
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/ky/cky64l574tkwxzjewzevqyhty73x4t3q4p6d2tu2humfvstjwiaa.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # x => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) # %le_2 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2048], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 2048 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 32 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, None) tl.store(out_ptr0 + (x2), tmp6, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args args.clear() assert_size_stride(primals_1, (32, 4), (4, 1)) assert_size_stride(primals_2, (32, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (32, 32), (32, 1)) assert_size_stride(primals_5, (32, ), (1, )) assert_size_stride(primals_6, (32, 32), (32, 1)) assert_size_stride(primals_7, (32, ), (1, )) assert_size_stride(primals_8, (4, 32), (32, 1)) assert_size_stride(primals_9, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 32), (32, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 32), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 32), (512, 128, 32, 1), 0); del buf0 # reuse buf9 = empty_strided_cuda((4, 4, 4, 32), (512, 128, 32, 1), torch.bool) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf9, 2048, grid=grid(2048), stream=stream0) del primals_2 buf2 = empty_strided_cuda((64, 32), (32, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf1, (64, 32), (32, 1), 0), reinterpret_tensor(primals_4, (32, 32), (1, 32), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 32), (512, 128, 32, 1), 0); del buf2 # reuse buf8 = empty_strided_cuda((4, 4, 4, 32), (512, 128, 32, 1), torch.bool) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_0.run(buf3, primals_5, buf8, 2048, grid=grid(2048), stream=stream0) del primals_5 buf4 = empty_strided_cuda((64, 32), (32, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf3, (64, 32), (32, 1), 0), reinterpret_tensor(primals_6, (32, 32), (1, 32), 0), out=buf4) buf5 = reinterpret_tensor(buf4, (4, 4, 4, 32), (512, 128, 32, 1), 0); del buf4 # reuse buf7 = empty_strided_cuda((4, 4, 4, 32), (512, 128, 32, 1), torch.bool) # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_0.run(buf5, primals_7, buf7, 2048, grid=grid(2048), stream=stream0) del primals_7 buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_3], Original ATen: [aten.addmm] extern_kernels.addmm(primals_9, reinterpret_tensor(buf5, (64, 32), (32, 1), 0), reinterpret_tensor(primals_8, (32, 4), (1, 32), 0), alpha=1, beta=1, out=buf6) del primals_9 return (reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 32), (32, 1), 0), reinterpret_tensor(buf3, (64, 32), (32, 1), 0), reinterpret_tensor(buf5, (64, 32), (32, 1), 0), primals_8, buf7, primals_6, buf8, primals_4, buf9, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((32, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((32, 32), (32, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((32, 32), (32, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, 32), (32, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class DQN(nn.Module): def __init__(self, n_in, n_out): super(DQN, self).__init__() self.ln1 = nn.Linear(n_in, 32) self.ln2 = nn.Linear(32, 32) self.ln4 = nn.Linear(32, 32) self.out = nn.Linear(32, n_out) def forward(self, x): x = F.relu(self.ln1(x)) x = F.relu(self.ln2(x)) x = F.relu(self.ln4(x)) return self.out(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'n_in': 4, 'n_out': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 32 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, None) tl.store(out_ptr0 + x2, tmp6, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (32, 4), (4, 1)) assert_size_stride(primals_2, (32,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (32, 32), (32, 1)) assert_size_stride(primals_5, (32,), (1,)) assert_size_stride(primals_6, (32, 32), (32, 1)) assert_size_stride(primals_7, (32,), (1,)) assert_size_stride(primals_8, (4, 32), (32, 1)) assert_size_stride(primals_9, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 32), (32, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 32), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 32), (512, 128, 32, 1), 0) del buf0 buf9 = empty_strided_cuda((4, 4, 4, 32), (512, 128, 32, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(2048)](buf1, primals_2, buf9, 2048, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 32), (32, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 32), (32, 1), 0), reinterpret_tensor(primals_4, (32, 32), (1, 32), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 32), (512, 128, 32, 1), 0) del buf2 buf8 = empty_strided_cuda((4, 4, 4, 32), (512, 128, 32, 1), torch.bool) triton_poi_fused_relu_threshold_backward_0[grid(2048)](buf3, primals_5, buf8, 2048, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 32), (32, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf3, (64, 32), (32, 1), 0), reinterpret_tensor(primals_6, (32, 32), (1, 32), 0), out=buf4) buf5 = reinterpret_tensor(buf4, (4, 4, 4, 32), (512, 128, 32, 1), 0) del buf4 buf7 = empty_strided_cuda((4, 4, 4, 32), (512, 128, 32, 1), torch.bool) triton_poi_fused_relu_threshold_backward_0[grid(2048)](buf5, primals_7, buf7, 2048, XBLOCK=256, num_warps=4, num_stages=1) del primals_7 buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_9, reinterpret_tensor(buf5, (64, 32), (32, 1), 0), reinterpret_tensor(primals_8, (32, 4), (1, 32), 0), alpha=1, beta=1, out=buf6) del primals_9 return reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 32), (32, 1), 0), reinterpret_tensor( buf3, (64, 32), (32, 1), 0), reinterpret_tensor(buf5, (64, 32), (32, 1), 0), primals_8, buf7, primals_6, buf8, primals_4, buf9 class DQNNew(nn.Module): def __init__(self, n_in, n_out): super(DQNNew, self).__init__() self.ln1 = nn.Linear(n_in, 32) self.ln2 = nn.Linear(32, 32) self.ln4 = nn.Linear(32, 32) self.out = nn.Linear(32, n_out) def forward(self, input_0): primals_1 = self.ln1.weight primals_2 = self.ln1.bias primals_4 = self.ln2.weight primals_5 = self.ln2.bias primals_6 = self.ln4.weight primals_7 = self.ln4.bias primals_8 = self.out.weight primals_9 = self.out.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
AndrejHafner/tetris-reinforcement-learning
DQN
false
1,968
[ "MIT" ]
0
52db5d8ce7f9162b15575456a0effc69dd7fb2bf
https://github.com/AndrejHafner/tetris-reinforcement-learning/tree/52db5d8ce7f9162b15575456a0effc69dd7fb2bf
import torch import torch.nn as nn import torch.nn.functional as F class Model(nn.Module): def __init__(self, n_in, n_out): super().__init__() self.ln1 = nn.Linear(n_in, 32) self.ln2 = nn.Linear(32, 32) self.ln4 = nn.Linear(32, 32) self.out = nn.Linear(32, n_out) def forward(self, x): x = F.relu(self.ln1(x)) x = F.relu(self.ln2(x)) x = F.relu(self.ln4(x)) return self.out(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 4]
L2Norm
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/h6/ch6lbegsbikmeuedhuqntueffmboz2ejtnlwrp3sosxr5gscbhtj.py # Topologically Sorted Source Nodes: [pow_1, sum_1, sqrt, norm, x, out], Original ATen: [aten.pow, aten.sum, aten.sqrt, aten.add, aten.div, aten.mul] # Source node to ATen node mapping: # norm => add # out => mul # pow_1 => pow_1 # sqrt => sqrt # sum_1 => sum_1 # x => div # Graph fragment: # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_1, 2), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1], True), kwargs = {}) # %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%sum_1,), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sqrt, 1e-10), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_1, %add), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expand, %div), kwargs = {}) triton_poi_fused_add_div_mul_pow_sqrt_sum_0 = async_compile.triton('triton_poi_fused_add_div_mul_pow_sqrt_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mul_pow_sqrt_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_mul_pow_sqrt_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 16) % 4 x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x3), xmask) tmp2 = tl.load(in_ptr1 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr1 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = tmp2 * tmp2 tmp5 = tmp4 * tmp4 tmp6 = tmp3 + tmp5 tmp8 = tmp7 * tmp7 tmp9 = tmp6 + tmp8 tmp11 = tmp10 * tmp10 tmp12 = tmp9 + tmp11 tmp13 = libdevice.sqrt(tmp12) tmp14 = 1e-10 tmp15 = tmp13 + tmp14 tmp16 = tmp1 / tmp15 tmp17 = tmp0 * tmp16 tl.store(out_ptr0 + (x3), tmp17, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [pow_1, sum_1, sqrt, norm, x, out], Original ATen: [aten.pow, aten.sum, aten.sqrt, aten.add, aten.div, aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_add_div_mul_pow_sqrt_sum_0.run(primals_2, primals_1, buf0, 256, grid=grid(256), stream=stream0) del primals_2 return (buf0, primals_1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.utils.data import torch.nn as nn import torch.nn.init as init class L2Norm(nn.Module): def __init__(self, n_channels, scale): super(L2Norm, self).__init__() self.n_channels = n_channels self.gamma = scale or None self.eps = 1e-10 self.weight = nn.Parameter(torch.Tensor(self.n_channels)) self.reset_parameters() def reset_parameters(self): init.constant_(self.weight, self.gamma) def forward(self, x): norm = x.pow(2).sum(dim=1, keepdim=True).sqrt() + self.eps x = torch.div(x, norm) out = self.weight.unsqueeze(0).unsqueeze(2).unsqueeze(3).expand_as(x ) * x return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'n_channels': 4, 'scale': 1.0}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.utils.data import torch.nn as nn import torch.nn.init as init assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_div_mul_pow_sqrt_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 % 4 x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x3, xmask) tmp2 = tl.load(in_ptr1 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr1 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp7 = tl.load(in_ptr1 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp10 = tl.load(in_ptr1 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tmp2 * tmp2 tmp5 = tmp4 * tmp4 tmp6 = tmp3 + tmp5 tmp8 = tmp7 * tmp7 tmp9 = tmp6 + tmp8 tmp11 = tmp10 * tmp10 tmp12 = tmp9 + tmp11 tmp13 = libdevice.sqrt(tmp12) tmp14 = 1e-10 tmp15 = tmp13 + tmp14 tmp16 = tmp1 / tmp15 tmp17 = tmp0 * tmp16 tl.store(out_ptr0 + x3, tmp17, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_mul_pow_sqrt_sum_0[grid(256)](primals_2, primals_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 return buf0, primals_1 class L2NormNew(nn.Module): def __init__(self, n_channels, scale): super(L2NormNew, self).__init__() self.n_channels = n_channels self.gamma = scale or None self.eps = 1e-10 self.weight = nn.Parameter(torch.Tensor(self.n_channels)) self.reset_parameters() def reset_parameters(self): init.constant_(self.weight, self.gamma) def forward(self, input_0): primals_2 = self.weight primals_1 = input_0 output = call([primals_1, primals_2]) return output[0]
Anonymous4604/Self-ADE_SSD
L2Norm
false
1,969
[ "MIT" ]
0
eb4107e17721e17f2dedbdae654a43fc5d291f8c
https://github.com/Anonymous4604/Self-ADE_SSD/tree/eb4107e17721e17f2dedbdae654a43fc5d291f8c
import torch import torch.utils.data import torch.nn as nn import torch.nn.init as init class Model(nn.Module): def __init__(self, n_channels, scale): super().__init__() self.n_channels = n_channels self.gamma = scale or None self.eps = 1e-10 self.weight = nn.Parameter(torch.Tensor(self.n_channels)) self.reset_parameters() def reset_parameters(self): init.constant_(self.weight, self.gamma) def forward(self, x): norm = x.pow(2).sum(dim=1, keepdim=True).sqrt() + self.eps x = torch.div(x, norm) out = self.weight.unsqueeze(0).unsqueeze(2).unsqueeze(3).expand_as(x ) * x return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 1.0]
EqualConv2d
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/zs/czsbkexmu6ywpra7jqion5n6drhfl2liw6og7nt2lnvf5ix7ikrs.py # Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul] # Source node to ATen node mapping: # mul => mul # Graph fragment: # %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, 0.125), kwargs = {}) triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 0.125 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/tc/ctcagp37ljugm52zu6ckorigrppqo67voefe2f2odg5r6hyllhyu.py # Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution] # Source node to ATen node mapping: # out => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %mul, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x2), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_mul_0.run(primals_1, buf0, 256, grid=grid(256), stream=stream0) del primals_1 # Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(primals_3, buf0, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 1, 1), (4, 1, 1, 1)) buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution] triton_poi_fused_convolution_1.run(buf2, primals_2, 16, grid=grid(16), stream=stream0) del primals_2 return (buf2, primals_3, buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch from torch import nn import torch.nn.functional as F class EqualConv2d(nn.Module): def __init__(self, in_channel, out_channel, kernel_size, stride=1, padding=0, bias=True): super().__init__() self.weight = nn.Parameter(torch.randn(out_channel, in_channel, kernel_size, kernel_size)) self.scale = 1 / math.sqrt(in_channel * kernel_size ** 2) self.stride = stride self.padding = padding if bias: self.bias = nn.Parameter(torch.zeros(out_channel)) else: self.bias = None def forward(self, input): out = F.conv2d(input, self.weight * self.scale, bias=self.bias, stride=self.stride, padding=self.padding) return out def __repr__(self): return ( f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]}, {self.weight.shape[2]}, stride={self.stride}, padding={self.padding})' ) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channel': 4, 'out_channel': 4, 'kernel_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.125 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(256)](primals_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(primals_3, buf0, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 1, 1), (4, 1, 1, 1)) buf2 = buf1 del buf1 triton_poi_fused_convolution_1[grid(16)](buf2, primals_2, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_2 return buf2, primals_3, buf0 class EqualConv2dNew(nn.Module): def __init__(self, in_channel, out_channel, kernel_size, stride=1, padding=0, bias=True): super().__init__() self.weight = nn.Parameter(torch.randn(out_channel, in_channel, kernel_size, kernel_size)) self.scale = 1 / math.sqrt(in_channel * kernel_size ** 2) self.stride = stride self.padding = padding if bias: self.bias = nn.Parameter(torch.zeros(out_channel)) else: self.bias = None def __repr__(self): return ( f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]}, {self.weight.shape[2]}, stride={self.stride}, padding={self.padding})' ) def forward(self, input_0): primals_1 = self.weight primals_2 = self.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
ArashVahabpour/encoder4editing
EqualConv2d
false
1,970
[ "MIT" ]
0
819b90ecd7397fbe2ab7cb30eb451dab0f3149fd
https://github.com/ArashVahabpour/encoder4editing/tree/819b90ecd7397fbe2ab7cb30eb451dab0f3149fd
import math import torch from torch import nn import torch.nn.functional as F class Model(nn.Module): def __init__(self, in_channel, out_channel, kernel_size, stride=1, padding=0, bias=True): super().__init__() self.weight = nn.Parameter(torch.randn(out_channel, in_channel, kernel_size, kernel_size)) self.scale = 1 / math.sqrt(in_channel * kernel_size ** 2) self.stride = stride self.padding = padding if bias: self.bias = nn.Parameter(torch.zeros(out_channel)) else: self.bias = None def forward(self, input): out = F.conv2d(input, self.weight * self.scale, bias=self.bias, stride=self.stride, padding=self.padding) return out def __repr__(self): return ( f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]}, {self.weight.shape[2]}, stride={self.stride}, padding={self.padding})' ) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 4, 4]
NoiseInjection
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/zd/czd6aawbiozsgqkmo34wem4qctui4myzcthrzembv5p2bnbo25gz.py # Topologically Sorted Source Nodes: [mul, add], Original ATen: [aten.mul, aten.add] # Source node to ATen node mapping: # add => add # mul => mul # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %normal_functional), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %mul), kwargs = {}) triton_poi_fused_add_mul_0 = async_compile.triton('triton_poi_fused_add_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr1 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tl.load(in_ptr2 + (x0 + (16*x2)), xmask, eviction_policy='evict_last') tmp4 = tmp2 * tmp3 tmp5 = tmp0 + tmp4 tl.store(out_ptr0 + (x3), tmp5, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1, ), (1, )) buf0 = empty_strided_cuda((4, 1, 4, 4), (16, 16, 4, 1), torch.float32) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [noise], Original ATen: [aten.normal_functional] buf1 = torch.ops.aten.normal_functional.default(buf0) del buf0 buf2 = buf1 del buf1 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul, add], Original ATen: [aten.mul, aten.add] stream0 = get_raw_stream(0) triton_poi_fused_add_mul_0.run(primals_1, primals_2, buf2, buf3, 256, grid=grid(256), stream=stream0) del primals_1 del primals_2 return (buf3, buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn class NoiseInjection(nn.Module): def __init__(self): super().__init__() self.weight = nn.Parameter(torch.zeros(1)) def forward(self, image, noise=None): if noise is None: batch, _, height, width = image.shape noise = image.new_empty(batch, 1, height, width).normal_() return image + self.weight * noise def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_mul_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tl.load(in_ptr2 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tmp2 * tmp3 tmp5 = tmp0 + tmp4 tl.store(out_ptr0 + x3, tmp5, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1,), (1,)) buf0 = empty_strided_cuda((4, 1, 4, 4), (16, 16, 4, 1), torch.float32) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = torch.ops.aten.normal_functional.default(buf0) del buf0 buf2 = buf1 del buf1 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mul_0[grid(256)](primals_1, primals_2, buf2, buf3, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 del primals_2 return buf3, buf2 class NoiseInjectionNew(nn.Module): def __init__(self): super().__init__() self.weight = nn.Parameter(torch.zeros(1)) def forward(self, input_0): primals_2 = self.weight primals_1 = input_0 output = call([primals_1, primals_2]) return output[0]
ArashVahabpour/encoder4editing
NoiseInjection
false
1,971
[ "MIT" ]
0
819b90ecd7397fbe2ab7cb30eb451dab0f3149fd
https://github.com/ArashVahabpour/encoder4editing/tree/819b90ecd7397fbe2ab7cb30eb451dab0f3149fd
import torch from torch import nn class Model(nn.Module): def __init__(self): super().__init__() self.weight = nn.Parameter(torch.zeros(1)) def forward(self, image, noise=None): if noise is None: batch, _, height, width = image.shape noise = image.new_empty(batch, 1, height, width).normal_() return image + self.weight * noise def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
QuickGELU
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/vb/cvbzgl2kevabwrh6e5ay2dasnq6gi3a7zeveknxks7jkbuq64ime.py # Topologically Sorted Source Nodes: [mul, sigmoid, mul_1], Original ATen: [aten.mul, aten.sigmoid] # Source node to ATen node mapping: # mul => mul # mul_1 => mul_1 # sigmoid => sigmoid # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, 1.702), kwargs = {}) # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%mul,), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %sigmoid), kwargs = {}) triton_poi_fused_mul_sigmoid_0 = async_compile.triton('triton_poi_fused_mul_sigmoid_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sigmoid_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_sigmoid_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 1.702 tmp2 = tmp0 * tmp1 tmp3 = tl.sigmoid(tmp2) tmp4 = tmp0 * tmp3 tl.store(out_ptr0 + (x0), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul, sigmoid, mul_1], Original ATen: [aten.mul, aten.sigmoid] stream0 = get_raw_stream(0) triton_poi_fused_mul_sigmoid_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn class QuickGELU(nn.Module): def forward(self, x: 'torch.Tensor'): return x * torch.sigmoid(1.702 * x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mul_sigmoid_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 1.702 tmp2 = tmp0 * tmp1 tmp3 = tl.sigmoid(tmp2) tmp4 = tmp0 * tmp3 tl.store(out_ptr0 + x0, tmp4, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_sigmoid_0[grid(256)](arg0_1, buf0, 256, XBLOCK =256, num_warps=4, num_stages=1) del arg0_1 return buf0, class QuickGELUNew(nn.Module): def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
Artanic30/RentalPrediction
QuickGELU
false
1,972
[ "MIT" ]
0
5804ab9b453d2a40bce2bb304c31efc98a803ed8
https://github.com/Artanic30/RentalPrediction/tree/5804ab9b453d2a40bce2bb304c31efc98a803ed8
import torch from torch import nn class Model(nn.Module): def forward(self, x: 'torch.Tensor'): return x * torch.sigmoid(1.702 * x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
HuberLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/h3/ch3qidii4qfm5ghapin7tnms27ccjbq3fj2gekdrfdy7mbjfc6tb.py # Topologically Sorted Source Nodes: [truediv, truediv_1, loss, mul, mul_1], Original ATen: [aten.div, aten.smooth_l1_loss, aten.mul] # Source node to ATen node mapping: # loss => abs_1, div_2, lt, mean, mul, pow_1, sub, sub_1, where # mul => mul_1 # mul_1 => mul_2 # truediv => div # truediv_1 => div_1 # Graph fragment: # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%arg0_1, 1), kwargs = {}) # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%arg1_1, 1), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div, %div_1), kwargs = {}) # %abs_1 : [num_users=3] = call_function[target=torch.ops.aten.abs.default](args = (%sub,), kwargs = {}) # %lt : [num_users=1] = call_function[target=torch.ops.aten.lt.Scalar](args = (%abs_1, 1.0), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%abs_1, 2), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_1, 0.5), kwargs = {}) # %div_2 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul, 1.0), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%abs_1, 0.5), kwargs = {}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%lt, %div_2, %sub_1), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%where,), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean, 1), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_1, 1), kwargs = {}) triton_per_fused_div_mul_smooth_l1_loss_0 = async_compile.triton('triton_per_fused_div_mul_smooth_l1_loss_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_div_mul_smooth_l1_loss_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_div_mul_smooth_l1_loss_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp3 = tl.load(in_ptr1 + (r0), None) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp5 = tmp2 - tmp4 tmp6 = tl_math.abs(tmp5) tmp7 = tmp6 < tmp1 tmp8 = tmp6 * tmp6 tmp9 = 0.5 tmp10 = tmp8 * tmp9 tmp11 = tmp10 * tmp1 tmp12 = tmp6 - tmp9 tmp13 = tl.where(tmp7, tmp11, tmp12) tmp14 = tl.broadcast_to(tmp13, [RBLOCK]) tmp16 = triton_helpers.promote_to_tensor(tl.sum(tmp14, 0)) tmp17 = 256.0 tmp18 = tmp16 / tmp17 tmp19 = tmp18 * tmp1 tmp20 = tmp19 * tmp1 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp20, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [truediv, truediv_1, loss, mul, mul_1], Original ATen: [aten.div, aten.smooth_l1_loss, aten.mul] stream0 = get_raw_stream(0) triton_per_fused_div_mul_smooth_l1_loss_0.run(buf1, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.utils.data class HuberLoss(nn.Module): def __init__(self, delta=1): super().__init__() self.huber_loss_delta1 = nn.SmoothL1Loss() self.delta = delta def forward(self, x, x_hat): loss = self.huber_loss_delta1(x / self.delta, x_hat / self.delta) return loss * self.delta * self.delta def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_div_mul_smooth_l1_loss_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp3 = tl.load(in_ptr1 + r0, None) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp5 = tmp2 - tmp4 tmp6 = tl_math.abs(tmp5) tmp7 = tmp6 < tmp1 tmp8 = tmp6 * tmp6 tmp9 = 0.5 tmp10 = tmp8 * tmp9 tmp11 = tmp10 * tmp1 tmp12 = tmp6 - tmp9 tmp13 = tl.where(tmp7, tmp11, tmp12) tmp14 = tl.broadcast_to(tmp13, [RBLOCK]) tmp16 = triton_helpers.promote_to_tensor(tl.sum(tmp14, 0)) tmp17 = 256.0 tmp18 = tmp16 / tmp17 tmp19 = tmp18 * tmp1 tmp20 = tmp19 * tmp1 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp20, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_div_mul_smooth_l1_loss_0[grid(1)](buf1, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class HuberLossNew(nn.Module): def __init__(self, delta=1): super().__init__() self.huber_loss_delta1 = nn.SmoothL1Loss() self.delta = delta def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
ArashVahabpour/sog-gail
HuberLoss
false
1,973
[ "MIT" ]
0
90ebdc5a051a015f3b6c801d4b16307d2fbac0ae
https://github.com/ArashVahabpour/sog-gail/tree/90ebdc5a051a015f3b6c801d4b16307d2fbac0ae
import torch import torch.nn as nn import torch.utils.data class Model(nn.Module): def __init__(self, delta=1): super().__init__() self.huber_loss_delta1 = nn.SmoothL1Loss() self.delta = delta def forward(self, x, x_hat): loss = self.huber_loss_delta1(x / self.delta, x_hat / self.delta) return loss * self.delta * self.delta def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
PixelNorm
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/oj/cojs3rolrbuzbumcv56hzrpmpk5vjaaabxquccaenqx3ti3blyt2.py # Topologically Sorted Source Nodes: [tmp, mean, add, tmp1, mul_1], Original ATen: [aten.mul, aten.mean, aten.add, aten.rsqrt] # Source node to ATen node mapping: # add => add # mean => mean # mul_1 => mul_1 # tmp => mul # tmp1 => rsqrt # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %arg0_1), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%mul, [1], True), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean, 1e-08), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %rsqrt), kwargs = {}) triton_poi_fused_add_mean_mul_rsqrt_0 = async_compile.triton('triton_poi_fused_add_mean_mul_rsqrt_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mean_mul_rsqrt_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mean_mul_rsqrt_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = 4.0 tmp13 = tmp11 / tmp12 tmp14 = 1e-08 tmp15 = tmp13 + tmp14 tmp16 = libdevice.rsqrt(tmp15) tmp17 = tmp0 * tmp16 tl.store(out_ptr0 + (x3), tmp17, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [tmp, mean, add, tmp1, mul_1], Original ATen: [aten.mul, aten.mean, aten.add, aten.rsqrt] stream0 = get_raw_stream(0) triton_poi_fused_add_mean_mul_rsqrt_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.utils.data from torch import nn class PixelNorm(nn.Module): def __init__(self, epsilon=1e-08): super(PixelNorm, self).__init__() self.epsilon = epsilon def forward(self, x): tmp = torch.mul(x, x) tmp1 = torch.rsqrt(torch.mean(tmp, dim=1, keepdim=True) + self.epsilon) return x * tmp1 def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.utils.data from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_mean_mul_rsqrt_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = 4.0 tmp13 = tmp11 / tmp12 tmp14 = 1e-08 tmp15 = tmp13 + tmp14 tmp16 = libdevice.rsqrt(tmp15) tmp17 = tmp0 * tmp16 tl.store(out_ptr0 + x3, tmp17, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mean_mul_rsqrt_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class PixelNormNew(nn.Module): def __init__(self, epsilon=1e-08): super(PixelNormNew, self).__init__() self.epsilon = epsilon def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
Archjbald/PoseStylizer
PixelNorm
false
1,974
[ "BSD-3-Clause" ]
0
95aae02d1f4ac83536d91b8db5f78d12e7830f97
https://github.com/Archjbald/PoseStylizer/tree/95aae02d1f4ac83536d91b8db5f78d12e7830f97
import torch import torch.utils.data from torch import nn class Model(nn.Module): def __init__(self, epsilon=1e-08): super().__init__() self.epsilon = epsilon def forward(self, x): tmp = torch.mul(x, x) tmp1 = torch.rsqrt(torch.mean(tmp, dim=1, keepdim=True) + self.epsilon) return x * tmp1 def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
SEModule
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/l3/cl35tzbhrd24dhunkbb6gjs54aklpyr46oikqhoylcgmkcmhujil.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.mean] # Source node to ATen node mapping: # x => mean # Graph fragment: # %mean : [num_users=2] = call_function[target=torch.ops.aten.mean.dim](args = (%primals_1, [-1, -2], True), kwargs = {}) triton_per_fused_mean_0 = async_compile.triton('triton_per_fused_mean_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[16, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 16 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp5 = 16.0 tmp6 = tmp4 / tmp5 tl.debug_barrier() tl.store(in_out_ptr0 + (x0), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/dv/cdv4qmhbronnvlfedec6wf2u757xxim2rrcxixauamfjkccvlhcu.py # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.relu] # Source node to ATen node mapping: # x_2 => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_relu_1 = async_compile.triton('triton_poi_fused_relu_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_1(in_out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tl.store(in_out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/xn/cxnpie6wemy7uacyfqmxuw22lmj4fbp5lpj4ual7647ci2bfkl3t.py # Topologically Sorted Source Nodes: [x_4, mul], Original ATen: [aten.sigmoid, aten.mul] # Source node to ATen node mapping: # mul => mul # x_4 => sigmoid # Graph fragment: # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%convolution_1,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, %sigmoid), kwargs = {}) triton_poi_fused_mul_sigmoid_2 = async_compile.triton('triton_poi_fused_mul_sigmoid_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sigmoid_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_sigmoid_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 16) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp2 = tl.sigmoid(tmp1) tmp3 = tmp0 * tmp2 tl.store(out_ptr0 + (x2), tmp3, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_3, (4, 1, 1, 1), (1, 1, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [x], Original ATen: [aten.mean] stream0 = get_raw_stream(0) triton_per_fused_mean_0.run(buf1, primals_1, 16, 16, grid=grid(16), stream=stream0) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(buf1, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 1, 1, 1), (1, 1, 1, 1)) buf3 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.relu] triton_poi_fused_relu_1.run(buf3, 4, grid=grid(4), stream=stream0) # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.convolution] buf4 = extern_kernels.convolution(buf3, primals_3, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 4, 1, 1), (4, 1, 1, 1)) buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_4, mul], Original ATen: [aten.sigmoid, aten.mul] triton_poi_fused_mul_sigmoid_2.run(primals_1, buf4, buf5, 256, grid=grid(256), stream=stream0) return (buf5, primals_1, primals_2, primals_3, buf1, buf3, buf4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from torch.nn import Module import torch from torch.nn import Conv2d from torch.nn import ReLU from torch.nn import Sigmoid from torch.nn import AdaptiveAvgPool2d class SEModule(Module): def __init__(self, channels, reduction): super(SEModule, self).__init__() self.avg_pool = AdaptiveAvgPool2d(1) self.fc1 = Conv2d(channels, channels // reduction, kernel_size=1, padding=0, bias=False) self.relu = ReLU(inplace=True) self.fc2 = Conv2d(channels // reduction, channels, kernel_size=1, padding=0, bias=False) self.sigmoid = Sigmoid() def forward(self, x): module_input = x x = self.avg_pool(x) x = self.fc1(x) x = self.relu(x) x = self.fc2(x) x = self.sigmoid(x) return module_input * x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'channels': 4, 'reduction': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch.nn import Module from torch.nn import Conv2d from torch.nn import ReLU from torch.nn import Sigmoid from torch.nn import AdaptiveAvgPool2d assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp5 = 16.0 tmp6 = tmp4 / tmp5 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp6, xmask) @triton.jit def triton_poi_fused_relu_1(in_out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tl.store(in_out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_mul_sigmoid_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 16 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tl.sigmoid(tmp1) tmp3 = tmp0 * tmp2 tl.store(out_ptr0 + x2, tmp3, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_3, (4, 1, 1, 1), (1, 1, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0) del buf0 get_raw_stream(0) triton_per_fused_mean_0[grid(16)](buf1, primals_1, 16, 16, XBLOCK=1, num_warps=2, num_stages=1) buf2 = extern_kernels.convolution(buf1, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 1, 1, 1), (1, 1, 1, 1)) buf3 = buf2 del buf2 triton_poi_fused_relu_1[grid(4)](buf3, 4, XBLOCK=4, num_warps=1, num_stages=1) buf4 = extern_kernels.convolution(buf3, primals_3, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 4, 1, 1), (4, 1, 1, 1)) buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_mul_sigmoid_2[grid(256)](primals_1, buf4, buf5, 256, XBLOCK=256, num_warps=4, num_stages=1) return buf5, primals_1, primals_2, primals_3, buf1, buf3, buf4 class SEModuleNew(Module): def __init__(self, channels, reduction): super(SEModuleNew, self).__init__() self.avg_pool = AdaptiveAvgPool2d(1) self.fc1 = Conv2d(channels, channels // reduction, kernel_size=1, padding=0, bias=False) self.relu = ReLU(inplace=True) self.fc2 = Conv2d(channels // reduction, channels, kernel_size=1, padding=0, bias=False) self.sigmoid = Sigmoid() def forward(self, input_0): primals_2 = self.fc1.weight primals_3 = self.fc2.weight primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
ArashVahabpour/encoder4editing
SEModule
false
1,975
[ "MIT" ]
0
819b90ecd7397fbe2ab7cb30eb451dab0f3149fd
https://github.com/ArashVahabpour/encoder4editing/tree/819b90ecd7397fbe2ab7cb30eb451dab0f3149fd
from torch.nn import Module import torch from torch.nn import Conv2d from torch.nn import ReLU from torch.nn import Sigmoid from torch.nn import AdaptiveAvgPool2d class Model(Module): def __init__(self, channels, reduction): super().__init__() self.avg_pool = AdaptiveAvgPool2d(1) self.fc1 = Conv2d(channels, channels // reduction, kernel_size=1, padding=0, bias=False) self.relu = ReLU(inplace=True) self.fc2 = Conv2d(channels // reduction, channels, kernel_size=1, padding=0, bias=False) self.sigmoid = Sigmoid() def forward(self, x): module_input = x x = self.avg_pool(x) x = self.fc1(x) x = self.relu(x) x = self.fc2(x) x = self.sigmoid(x) return module_input * x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 4]
EqualLinear
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/wi/cwiyl3lwwtancorrifw77xt3aqb4lermdintht45zvkj3bg54nbl.py # Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul] # Source node to ATen node mapping: # mul => mul # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, 0.5), kwargs = {}) triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/2o/c2oqkq7zaubqmw7vuixxlseb2ff5jzqqbyczicxlmsahuxwdpdyp.py # Topologically Sorted Source Nodes: [mul_1], Original ATen: [aten.mul] # Source node to ATen node mapping: # mul_1 => mul_1 # Graph fragment: # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, 1), kwargs = {}) triton_poi_fused_mul_1 = async_compile.triton('triton_poi_fused_mul_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_mul_0.run(primals_1, buf0, 16, grid=grid(16), stream=stream0) del primals_1 buf1 = empty_strided_cuda((4, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [mul_1], Original ATen: [aten.mul] triton_poi_fused_mul_1.run(primals_2, buf1, 4, grid=grid(4), stream=stream0) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul_1, out], Original ATen: [aten.mul, aten.addmm] extern_kernels.addmm(buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf0, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del buf0 del buf1 return (reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from torch.autograd import Function import math import torch from torch import nn import torch.nn.functional as F def fused_leaky_relu(input, bias, negative_slope=0.2, scale=2 ** 0.5): return FusedLeakyReLUFunction.apply(input, bias, negative_slope, scale) class FusedLeakyReLUFunctionBackward(Function): @staticmethod def forward(ctx, grad_output, out, negative_slope, scale): ctx.save_for_backward(out) ctx.negative_slope = negative_slope ctx.scale = scale empty = grad_output.new_empty(0) grad_input = fused.fused_bias_act(grad_output, empty, out, 3, 1, negative_slope, scale) dim = [0] if grad_input.ndim > 2: dim += list(range(2, grad_input.ndim)) grad_bias = grad_input.sum(dim).detach() return grad_input, grad_bias @staticmethod def backward(ctx, gradgrad_input, gradgrad_bias): out, = ctx.saved_tensors gradgrad_out = fused.fused_bias_act(gradgrad_input, gradgrad_bias, out, 3, 1, ctx.negative_slope, ctx.scale) return gradgrad_out, None, None, None class FusedLeakyReLUFunction(Function): @staticmethod def forward(ctx, input, bias, negative_slope, scale): empty = input.new_empty(0) out = fused.fused_bias_act(input, bias, empty, 3, 0, negative_slope, scale) ctx.save_for_backward(out) ctx.negative_slope = negative_slope ctx.scale = scale return out @staticmethod def backward(ctx, grad_output): out, = ctx.saved_tensors grad_input, grad_bias = FusedLeakyReLUFunctionBackward.apply( grad_output, out, ctx.negative_slope, ctx.scale) return grad_input, grad_bias, None, None class EqualLinear(nn.Module): def __init__(self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1, activation=None): super().__init__() self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul)) if bias: self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init)) else: self.bias = None self.activation = activation self.scale = 1 / math.sqrt(in_dim) * lr_mul self.lr_mul = lr_mul def forward(self, input): if self.activation: out = F.linear(input, self.weight * self.scale) out = fused_leaky_relu(out, self.bias * self.lr_mul) else: out = F.linear(input, self.weight * self.scale, bias=self.bias * self.lr_mul) return out def __repr__(self): return ( f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})' ) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_dim': 4, 'out_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch.autograd import Function import math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_mul_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(16)](primals_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4,), (1,), torch.float32) triton_poi_fused_mul_1[grid(4)](primals_2, buf1, 4, XBLOCK=4, num_warps=1, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(buf1, reinterpret_tensor(primals_3, (64, 4), ( 4, 1), 0), reinterpret_tensor(buf0, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del buf0 del buf1 return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0) def fused_leaky_relu(input, bias, negative_slope=0.2, scale=2 ** 0.5): return FusedLeakyReLUFunction.apply(input, bias, negative_slope, scale) class FusedLeakyReLUFunctionBackward(Function): @staticmethod def forward(ctx, grad_output, out, negative_slope, scale): ctx.save_for_backward(out) ctx.negative_slope = negative_slope ctx.scale = scale empty = grad_output.new_empty(0) grad_input = fused.fused_bias_act(grad_output, empty, out, 3, 1, negative_slope, scale) dim = [0] if grad_input.ndim > 2: dim += list(range(2, grad_input.ndim)) grad_bias = grad_input.sum(dim).detach() return grad_input, grad_bias @staticmethod def backward(ctx, gradgrad_input, gradgrad_bias): out, = ctx.saved_tensors gradgrad_out = fused.fused_bias_act(gradgrad_input, gradgrad_bias, out, 3, 1, ctx.negative_slope, ctx.scale) return gradgrad_out, None, None, None class FusedLeakyReLUFunction(Function): @staticmethod def forward(ctx, input, bias, negative_slope, scale): empty = input.new_empty(0) out = fused.fused_bias_act(input, bias, empty, 3, 0, negative_slope, scale) ctx.save_for_backward(out) ctx.negative_slope = negative_slope ctx.scale = scale return out @staticmethod def backward(ctx, grad_output): out, = ctx.saved_tensors grad_input, grad_bias = FusedLeakyReLUFunctionBackward.apply( grad_output, out, ctx.negative_slope, ctx.scale) return grad_input, grad_bias, None, None class EqualLinearNew(nn.Module): def __init__(self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1, activation=None): super().__init__() self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul)) if bias: self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init)) else: self.bias = None self.activation = activation self.scale = 1 / math.sqrt(in_dim) * lr_mul self.lr_mul = lr_mul def __repr__(self): return ( f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})' ) def forward(self, input_0): primals_1 = self.weight primals_2 = self.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
ArashVahabpour/encoder4editing
EqualLinear
false
1,976
[ "MIT" ]
0
819b90ecd7397fbe2ab7cb30eb451dab0f3149fd
https://github.com/ArashVahabpour/encoder4editing/tree/819b90ecd7397fbe2ab7cb30eb451dab0f3149fd
from torch.autograd import Function import math import torch from torch import nn import torch.nn.functional as F def fused_leaky_relu(input, bias, negative_slope=0.2, scale=2 ** 0.5): return FusedLeakyReLUFunction.apply(input, bias, negative_slope, scale) class FusedLeakyReLUFunctionBackward(Function): @staticmethod def forward(ctx, grad_output, out, negative_slope, scale): ctx.save_for_backward(out) ctx.negative_slope = negative_slope ctx.scale = scale empty = grad_output.new_empty(0) grad_input = fused.fused_bias_act(grad_output, empty, out, 3, 1, negative_slope, scale) dim = [0] if grad_input.ndim > 2: dim += list(range(2, grad_input.ndim)) grad_bias = grad_input.sum(dim).detach() return grad_input, grad_bias @staticmethod def backward(ctx, gradgrad_input, gradgrad_bias): out, = ctx.saved_tensors gradgrad_out = fused.fused_bias_act(gradgrad_input, gradgrad_bias, out, 3, 1, ctx.negative_slope, ctx.scale) return gradgrad_out, None, None, None class FusedLeakyReLUFunction(Function): @staticmethod def forward(ctx, input, bias, negative_slope, scale): empty = input.new_empty(0) out = fused.fused_bias_act(input, bias, empty, 3, 0, negative_slope, scale) ctx.save_for_backward(out) ctx.negative_slope = negative_slope ctx.scale = scale return out @staticmethod def backward(ctx, grad_output): out, = ctx.saved_tensors grad_input, grad_bias = FusedLeakyReLUFunctionBackward.apply( grad_output, out, ctx.negative_slope, ctx.scale) return grad_input, grad_bias, None, None class Model(nn.Module): def __init__(self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1, activation=None): super().__init__() self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul)) if bias: self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init)) else: self.bias = None self.activation = activation self.scale = 1 / math.sqrt(in_dim) * lr_mul self.lr_mul = lr_mul def forward(self, input): if self.activation: out = F.linear(input, self.weight * self.scale) out = fused_leaky_relu(out, self.bias * self.lr_mul) else: out = F.linear(input, self.weight * self.scale, bias=self.bias * self.lr_mul) return out def __repr__(self): return ( f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})' ) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 4]
Normalize
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/6o/c6ojhuuvyz43ogs3olib4lkalnymxhhnoqfyye667gq5hhhh5aqy.py # Topologically Sorted Source Nodes: [pow_1, sum_1, norm, add, out], Original ATen: [aten.pow, aten.sum, aten.add, aten.div] # Source node to ATen node mapping: # add => add # norm => pow_2 # out => div # pow_1 => pow_1 # sum_1 => sum_1 # Graph fragment: # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg0_1, 2), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1], True), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%pow_2, 1e-07), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%arg0_1, %add), kwargs = {}) triton_poi_fused_add_div_pow_sum_0 = async_compile.triton('triton_poi_fused_add_div_pow_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_pow_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_pow_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-07 tmp14 = tmp12 + tmp13 tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + (x3), tmp15, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [pow_1, sum_1, norm, add, out], Original ATen: [aten.pow, aten.sum, aten.add, aten.div] stream0 = get_raw_stream(0) triton_poi_fused_add_div_pow_sum_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.utils.data from torch import nn class Normalize(nn.Module): def __init__(self, power=2): super(Normalize, self).__init__() self.power = power def forward(self, x): norm = x.pow(self.power).sum(1, keepdim=True).pow(1.0 / self.power) out = x.div(norm + 1e-07) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.utils.data from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_div_pow_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-07 tmp14 = tmp12 + tmp13 tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + x3, tmp15, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_pow_sum_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf0, class NormalizeNew(nn.Module): def __init__(self, power=2): super(NormalizeNew, self).__init__() self.power = power def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
Archjbald/PoseStylizer
Normalize
false
1,977
[ "BSD-3-Clause" ]
0
95aae02d1f4ac83536d91b8db5f78d12e7830f97
https://github.com/Archjbald/PoseStylizer/tree/95aae02d1f4ac83536d91b8db5f78d12e7830f97
import torch import torch.utils.data from torch import nn class Model(nn.Module): def __init__(self, power=2): super().__init__() self.power = power def forward(self, x): norm = x.pow(self.power).sum(1, keepdim=True).pow(1.0 / self.power) out = x.div(norm + 1e-07) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
LayerNorm
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/j2/cj2qhnxmzdsgco22jtn4xpfh2ve5irgwuq3b2hzyjdh3jjjjqxpb.py # Topologically Sorted Source Nodes: [mean, std, sub, add, output, output_1], Original ATen: [aten.mean, aten.std, aten.sub, aten.add, aten.div] # Source node to ATen node mapping: # add => add # mean => mean # output => div # output_1 => add_1 # std => sqrt, var # sub => sub # Graph fragment: # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%primals_1, [-1], True), kwargs = {}) # %var : [num_users=1] = call_function[target=torch.ops.aten.var.correction](args = (%primals_1, [-1]), kwargs = {correction: 1.0, keepdim: True}) # %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%var,), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_1, %mean), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sqrt, 1e-06), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, %add), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%div, %primals_2), kwargs = {}) triton_poi_fused_add_div_mean_std_sub_0 = async_compile.triton('triton_poi_fused_add_div_mean_std_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mean_std_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_mean_std_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp28 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = 4.0 tmp9 = tmp7 / tmp8 tmp10 = tmp0 - tmp9 tmp11 = tmp1 - tmp9 tmp12 = tmp11 * tmp11 tmp13 = tmp2 - tmp9 tmp14 = tmp13 * tmp13 tmp15 = tmp12 + tmp14 tmp16 = tmp4 - tmp9 tmp17 = tmp16 * tmp16 tmp18 = tmp15 + tmp17 tmp19 = tmp6 - tmp9 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = 3.0 tmp23 = tmp21 / tmp22 tmp24 = libdevice.sqrt(tmp23) tmp25 = 1e-06 tmp26 = tmp24 + tmp25 tmp27 = tmp10 / tmp26 tmp29 = tmp27 + tmp28 tl.store(out_ptr0 + (x2), tmp29, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mean, std, sub, add, output, output_1], Original ATen: [aten.mean, aten.std, aten.sub, aten.add, aten.div] stream0 = get_raw_stream(0) triton_poi_fused_add_div_mean_std_sub_0.run(primals_1, primals_2, buf0, 256, grid=grid(256), stream=stream0) del primals_1 del primals_2 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.utils.data class LayerNorm(nn.Module): """ Simple 1D LayerNorm. """ def __init__(self, features, center=True, scale=False, eps=1e-06): super().__init__() self.center = center self.scale = scale self.eps = eps if self.scale: self.scale_param = nn.Parameter(torch.ones(features)) else: self.scale_param = None if self.center: self.center_param = nn.Parameter(torch.zeros(features)) else: self.center_param = None def forward(self, x): mean = x.mean(-1, keepdim=True) std = x.std(-1, keepdim=True) output = (x - mean) / (std + self.eps) if self.scale: output = output * self.scale_param if self.center: output = output + self.center_param return output def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'features': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_div_mean_std_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp28 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = 4.0 tmp9 = tmp7 / tmp8 tmp10 = tmp0 - tmp9 tmp11 = tmp1 - tmp9 tmp12 = tmp11 * tmp11 tmp13 = tmp2 - tmp9 tmp14 = tmp13 * tmp13 tmp15 = tmp12 + tmp14 tmp16 = tmp4 - tmp9 tmp17 = tmp16 * tmp16 tmp18 = tmp15 + tmp17 tmp19 = tmp6 - tmp9 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = 3.0 tmp23 = tmp21 / tmp22 tmp24 = libdevice.sqrt(tmp23) tmp25 = 1e-06 tmp26 = tmp24 + tmp25 tmp27 = tmp10 / tmp26 tmp29 = tmp27 + tmp28 tl.store(out_ptr0 + x2, tmp29, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_mean_std_sub_0[grid(256)](primals_1, primals_2, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 del primals_2 return buf0, class LayerNormNew(nn.Module): """ Simple 1D LayerNorm. """ def __init__(self, features, center=True, scale=False, eps=1e-06): super().__init__() self.center = center self.scale = scale self.eps = eps if self.scale: self.scale_param = nn.Parameter(torch.ones(features)) else: self.scale_param = None if self.center: self.center_param = nn.Parameter(torch.zeros(features)) else: self.center_param = None def forward(self, input_0): primals_2 = self.center_param primals_1 = input_0 output = call([primals_1, primals_2]) return output[0]
ArashVahabpour/sog-gail
LayerNorm
false
1,978
[ "MIT" ]
0
90ebdc5a051a015f3b6c801d4b16307d2fbac0ae
https://github.com/ArashVahabpour/sog-gail/tree/90ebdc5a051a015f3b6c801d4b16307d2fbac0ae
import torch import torch.nn as nn import torch.utils.data class Model(nn.Module): """ Simple 1D LayerNorm. """ def __init__(self, features, center=True, scale=False, eps=1e-06): super().__init__() self.center = center self.scale = scale self.eps = eps if self.scale: self.scale_param = nn.Parameter(torch.ones(features)) else: self.scale_param = None if self.center: self.center_param = nn.Parameter(torch.zeros(features)) else: self.center_param = None def forward(self, x): mean = x.mean(-1, keepdim=True) std = x.std(-1, keepdim=True) output = (x - mean) / (std + self.eps) if self.scale: output = output * self.scale_param if self.center: output = output + self.center_param return output def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4]
FC
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/fl/cflw6zjzdk2wqtau7m6nsei5vavjfijzxhb37zaa3xp4yxpw5yb2.py # Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul] # Source node to ATen node mapping: # mul => mul # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, 1.0), kwargs = {}) triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/xl/cxlks5rvjsbzynadhfsy4kar6dwm54cn6srrf6h4nfgsmy6v4ps4.py # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.leaky_relu, aten.view, aten.leaky_relu_backward] # Source node to ATen node mapping: # out_1 => gt, mul_2, view_3, where # Graph fragment: # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view_1, 0), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 0.2), kwargs = {}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %view_1, %mul_2), kwargs = {}) # %view_3 : [num_users=1] = call_function[target=torch.ops.aten.reshape.default](args = (%view_2, [4, 4, 4, 4]), kwargs = {}) # %gt_1 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view_8, 0), kwargs = {}) triton_poi_fused_leaky_relu_leaky_relu_backward_view_1 = async_compile.triton('triton_poi_fused_leaky_relu_leaky_relu_backward_view_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_leaky_relu_backward_view_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_leaky_relu_leaky_relu_backward_view_1(in_out_ptr0, in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x4), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = 1.0 tmp3 = tmp1 * tmp2 tmp4 = tmp0 + tmp3 tmp5 = 0.0 tmp6 = tmp4 > tmp5 tmp7 = 0.2 tmp8 = tmp4 * tmp7 tmp9 = tl.where(tmp6, tmp4, tmp8) tmp10 = tmp9 > tmp5 tl.store(out_ptr0 + (x4), tmp9, xmask) tl.store(out_ptr1 + (x4), tmp10, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, ), (1, )) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_mul_0.run(primals_2, buf0, 16, grid=grid(16), stream=stream0) del primals_2 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf0, (4, 4), (1, 4), 0), out=buf1) del buf0 buf2 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf1 # reuse buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.leaky_relu, aten.view, aten.leaky_relu_backward] triton_poi_fused_leaky_relu_leaky_relu_backward_view_1.run(buf2, primals_1, buf3, buf4, 256, grid=grid(256), stream=stream0) del buf2 del primals_1 return (buf3, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.utils.data from torch import nn import torch.nn.functional as F class FC(nn.Module): def __init__(self, in_channels, out_channels, gain=2 ** 0.5, use_wscale =False, lrmul=1.0, bias=True): super(FC, self).__init__() he_std = gain * in_channels ** -0.5 if use_wscale: init_std = 1.0 / lrmul self.w_lrmul = he_std * lrmul else: init_std = he_std / lrmul self.w_lrmul = lrmul self.weight = torch.nn.Parameter(torch.randn(out_channels, in_channels) * init_std) if bias: self.bias = torch.nn.Parameter(torch.zeros(out_channels)) self.b_lrmul = lrmul else: self.bias = None def forward(self, x): if self.bias is not None: out = F.linear(x, self.weight * self.w_lrmul, self.bias * self. b_lrmul) else: out = F.linear(x, self.weight * self.w_lrmul) out = F.leaky_relu(out, 0.2, inplace=True) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.utils.data from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_leaky_relu_leaky_relu_backward_view_1(in_out_ptr0, in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x4, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = 1.0 tmp3 = tmp1 * tmp2 tmp4 = tmp0 + tmp3 tmp5 = 0.0 tmp6 = tmp4 > tmp5 tmp7 = 0.2 tmp8 = tmp4 * tmp7 tmp9 = tl.where(tmp6, tmp4, tmp8) tmp10 = tmp9 > tmp5 tl.store(out_ptr0 + x4, tmp9, xmask) tl.store(out_ptr1 + x4, tmp10, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4,), (1,)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(16)](primals_2, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_2 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf0, (4, 4), (1, 4), 0), out=buf1) del buf0 buf2 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf1 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_leaky_relu_leaky_relu_backward_view_1[grid(256)](buf2, primals_1, buf3, buf4, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf2 del primals_1 return buf3, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf4 class FCNew(nn.Module): def __init__(self, in_channels, out_channels, gain=2 ** 0.5, use_wscale =False, lrmul=1.0, bias=True): super(FCNew, self).__init__() he_std = gain * in_channels ** -0.5 if use_wscale: init_std = 1.0 / lrmul self.w_lrmul = he_std * lrmul else: init_std = he_std / lrmul self.w_lrmul = lrmul self.weight = torch.nn.Parameter(torch.randn(out_channels, in_channels) * init_std) if bias: self.bias = torch.nn.Parameter(torch.zeros(out_channels)) self.b_lrmul = lrmul else: self.bias = None def forward(self, input_0): primals_2 = self.weight primals_1 = self.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
Archjbald/PoseStylizer
FC
false
1,979
[ "BSD-3-Clause" ]
0
95aae02d1f4ac83536d91b8db5f78d12e7830f97
https://github.com/Archjbald/PoseStylizer/tree/95aae02d1f4ac83536d91b8db5f78d12e7830f97
import torch import torch.utils.data from torch import nn import torch.nn.functional as F class Model(nn.Module): def __init__(self, in_channels, out_channels, gain=2 ** 0.5, use_wscale =False, lrmul=1.0, bias=True): super().__init__() he_std = gain * in_channels ** -0.5 if use_wscale: init_std = 1.0 / lrmul self.w_lrmul = he_std * lrmul else: init_std = he_std / lrmul self.w_lrmul = lrmul self.weight = torch.nn.Parameter(torch.randn(out_channels, in_channels) * init_std) if bias: self.bias = torch.nn.Parameter(torch.zeros(out_channels)) self.b_lrmul = lrmul else: self.bias = None def forward(self, x): if self.bias is not None: out = F.linear(x, self.weight * self.w_lrmul, self.bias * self. b_lrmul) else: out = F.linear(x, self.weight * self.w_lrmul) out = F.leaky_relu(out, 0.2, inplace=True) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 4]
ApplyStyle
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/yg/cygzi6evso6kefobgrwgjcxh5qgbn7zouyf7to742xeh6bsflplb.py # Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul] # Source node to ATen node mapping: # mul => mul # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, 0.5), kwargs = {}) triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/ks/cks6zm6ohlls2psiyunvuei3bidcsj24j42if3qvdr23hdob3xea.py # Topologically Sorted Source Nodes: [add, mul_2, x], Original ATen: [aten.add, aten.mul] # Source node to ATen node mapping: # add => add # mul_2 => mul_3 # x => add_1 # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%select_1, 1.0), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_4, %add), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_3, %select_3), kwargs = {}) triton_poi_fused_add_mul_1 = async_compile.triton('triton_poi_fused_add_mul_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4096], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4096 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 16) % 4 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), None) tmp1 = tl.load(in_ptr1 + (x1 + (8*x2) + (32*((x2 % 4) // 4)) + (128*(((4*((x2 // 4) % 4)) + (x2 % 4)) // 16))), None, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + (x1), None, eviction_policy='evict_last') tmp13 = tl.load(in_ptr1 + (4 + x1 + (8*x2) + (32*((x2 % 4) // 4)) + (128*(((4*((x2 // 4) % 4)) + (x2 % 4)) // 16))), None, eviction_policy='evict_last') tmp14 = tl.load(in_ptr2 + (4 + x1), None, eviction_policy='evict_last') tmp3 = 1.0 tmp4 = tmp2 * tmp3 tmp5 = tmp1 + tmp4 tmp6 = 0.0 tmp7 = tmp5 > tmp6 tmp8 = 0.2 tmp9 = tmp5 * tmp8 tmp10 = tl.where(tmp7, tmp5, tmp9) tmp11 = tmp10 + tmp3 tmp12 = tmp0 * tmp11 tmp15 = tmp14 * tmp3 tmp16 = tmp13 + tmp15 tmp17 = tmp16 > tmp6 tmp18 = tmp16 * tmp8 tmp19 = tl.where(tmp17, tmp16, tmp18) tmp20 = tmp12 + tmp19 tl.store(out_ptr0 + (x3), tmp20, None) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/k6/ck6hrjtdufvlbomtc3cbawgp7osoy4pwbhuveafdpa5lxgpyfg5o.py # Topologically Sorted Source Nodes: [], Original ATen: [aten.leaky_relu_backward] # Source node to ATen node mapping: # Graph fragment: # %gt_1 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view_6, 0), kwargs = {}) triton_poi_fused_leaky_relu_backward_2 = async_compile.triton('triton_poi_fused_leaky_relu_backward_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_backward_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_leaky_relu_backward_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 8 tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp2 = 1.0 tmp3 = tmp1 * tmp2 tmp4 = tmp0 + tmp3 tmp5 = 0.0 tmp6 = tmp4 > tmp5 tmp7 = 0.2 tmp8 = tmp4 * tmp7 tmp9 = tl.where(tmp6, tmp4, tmp8) tmp10 = tmp9 > tmp5 tl.store(out_ptr0 + (x3), tmp10, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (8, ), (1, )) assert_size_stride(primals_2, (8, 4), (4, 1)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (64, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((8, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_mul_0.run(primals_2, buf0, 32, grid=grid(32), stream=stream0) del primals_2 buf1 = empty_strided_cuda((64, 8), (8, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf0, (4, 8), (1, 4), 0), out=buf1) del buf0 buf2 = empty_strided_cuda((64, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [add, mul_2, x], Original ATen: [aten.add, aten.mul] triton_poi_fused_add_mul_1.run(primals_4, buf1, primals_1, buf2, 4096, grid=grid(4096), stream=stream0) buf3 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.bool) # Topologically Sorted Source Nodes: [], Original ATen: [aten.leaky_relu_backward] triton_poi_fused_leaky_relu_backward_2.run(buf1, primals_1, buf3, 512, grid=grid(512), stream=stream0) del buf1 del primals_1 return (buf2, primals_4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((8, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((64, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.utils.data from torch import nn import torch.nn.functional as F class FC(nn.Module): def __init__(self, in_channels, out_channels, gain=2 ** 0.5, use_wscale =False, lrmul=1.0, bias=True): super(FC, self).__init__() he_std = gain * in_channels ** -0.5 if use_wscale: init_std = 1.0 / lrmul self.w_lrmul = he_std * lrmul else: init_std = he_std / lrmul self.w_lrmul = lrmul self.weight = torch.nn.Parameter(torch.randn(out_channels, in_channels) * init_std) if bias: self.bias = torch.nn.Parameter(torch.zeros(out_channels)) self.b_lrmul = lrmul else: self.bias = None def forward(self, x): if self.bias is not None: out = F.linear(x, self.weight * self.w_lrmul, self.bias * self. b_lrmul) else: out = F.linear(x, self.weight * self.w_lrmul) out = F.leaky_relu(out, 0.2, inplace=True) return out class ApplyStyle(nn.Module): def __init__(self, latent_size, channels, use_wscale): super(ApplyStyle, self).__init__() self.linear = FC(latent_size, channels * 2, gain=1.0, use_wscale= use_wscale) def forward(self, x, latent): style = self.linear(latent) shape = [-1, 2, x.size(1), 1, 1] style = style.view(shape) x = x * (style[:, 0] + 1.0) + style[:, 1] return x def get_inputs(): return [torch.rand([64, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'latent_size': 4, 'channels': 4, 'use_wscale': 1.0}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.utils.data from torch import nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_add_mul_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 16 % 4 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + (x1 + 8 * x2 + 32 * (x2 % 4 // 4) + 128 * ((4 * (x2 // 4 % 4) + x2 % 4) // 16)), None, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + x1, None, eviction_policy='evict_last') tmp13 = tl.load(in_ptr1 + (4 + x1 + 8 * x2 + 32 * (x2 % 4 // 4) + 128 * ((4 * (x2 // 4 % 4) + x2 % 4) // 16)), None, eviction_policy= 'evict_last') tmp14 = tl.load(in_ptr2 + (4 + x1), None, eviction_policy='evict_last') tmp3 = 1.0 tmp4 = tmp2 * tmp3 tmp5 = tmp1 + tmp4 tmp6 = 0.0 tmp7 = tmp5 > tmp6 tmp8 = 0.2 tmp9 = tmp5 * tmp8 tmp10 = tl.where(tmp7, tmp5, tmp9) tmp11 = tmp10 + tmp3 tmp12 = tmp0 * tmp11 tmp15 = tmp14 * tmp3 tmp16 = tmp13 + tmp15 tmp17 = tmp16 > tmp6 tmp18 = tmp16 * tmp8 tmp19 = tl.where(tmp17, tmp16, tmp18) tmp20 = tmp12 + tmp19 tl.store(out_ptr0 + x3, tmp20, None) @triton.jit def triton_poi_fused_leaky_relu_backward_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 8 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = 1.0 tmp3 = tmp1 * tmp2 tmp4 = tmp0 + tmp3 tmp5 = 0.0 tmp6 = tmp4 > tmp5 tmp7 = 0.2 tmp8 = tmp4 * tmp7 tmp9 = tl.where(tmp6, tmp4, tmp8) tmp10 = tmp9 > tmp5 tl.store(out_ptr0 + x3, tmp10, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (8,), (1,)) assert_size_stride(primals_2, (8, 4), (4, 1)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (64, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((8, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(32)](primals_2, buf0, 32, XBLOCK=32, num_warps=1, num_stages=1) del primals_2 buf1 = empty_strided_cuda((64, 8), (8, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf0, (4, 8), (1, 4), 0), out=buf1) del buf0 buf2 = empty_strided_cuda((64, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_mul_1[grid(4096)](primals_4, buf1, primals_1, buf2, 4096, XBLOCK=256, num_warps=4, num_stages=1) buf3 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.bool) triton_poi_fused_leaky_relu_backward_2[grid(512)](buf1, primals_1, buf3, 512, XBLOCK=256, num_warps=4, num_stages=1) del buf1 del primals_1 return buf2, primals_4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf3 class FC(nn.Module): def __init__(self, in_channels, out_channels, gain=2 ** 0.5, use_wscale =False, lrmul=1.0, bias=True): super(FC, self).__init__() he_std = gain * in_channels ** -0.5 if use_wscale: init_std = 1.0 / lrmul self.w_lrmul = he_std * lrmul else: init_std = he_std / lrmul self.w_lrmul = lrmul self.weight = torch.nn.Parameter(torch.randn(out_channels, in_channels) * init_std) if bias: self.bias = torch.nn.Parameter(torch.zeros(out_channels)) self.b_lrmul = lrmul else: self.bias = None def forward(self, x): if self.bias is not None: out = F.linear(x, self.weight * self.w_lrmul, self.bias * self. b_lrmul) else: out = F.linear(x, self.weight * self.w_lrmul) out = F.leaky_relu(out, 0.2, inplace=True) return out class ApplyStyleNew(nn.Module): def __init__(self, latent_size, channels, use_wscale): super(ApplyStyleNew, self).__init__() self.linear = FC(latent_size, channels * 2, gain=1.0, use_wscale= use_wscale) def forward(self, input_0, input_1): primals_2 = self.linear.weight primals_1 = self.linear.bias primals_4 = input_0 primals_3 = input_1 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
Archjbald/PoseStylizer
ApplyStyle
false
1,980
[ "BSD-3-Clause" ]
0
95aae02d1f4ac83536d91b8db5f78d12e7830f97
https://github.com/Archjbald/PoseStylizer/tree/95aae02d1f4ac83536d91b8db5f78d12e7830f97
import torch import torch.utils.data from torch import nn import torch.nn.functional as F class FC(nn.Module): def __init__(self, in_channels, out_channels, gain=2 ** 0.5, use_wscale =False, lrmul=1.0, bias=True): super().__init__() he_std = gain * in_channels ** -0.5 if use_wscale: init_std = 1.0 / lrmul self.w_lrmul = he_std * lrmul else: init_std = he_std / lrmul self.w_lrmul = lrmul self.weight = torch.nn.Parameter(torch.randn(out_channels, in_channels) * init_std) if bias: self.bias = torch.nn.Parameter(torch.zeros(out_channels)) self.b_lrmul = lrmul else: self.bias = None def forward(self, x): if self.bias is not None: out = F.linear(x, self.weight * self.w_lrmul, self.bias * self. b_lrmul) else: out = F.linear(x, self.weight * self.w_lrmul) out = F.leaky_relu(out, 0.2, inplace=True) return out class Model(nn.Module): def __init__(self, latent_size, channels, use_wscale): super().__init__() self.linear = FC(latent_size, channels * 2, gain=1.0, use_wscale= use_wscale) def forward(self, x, latent): style = self.linear(latent) shape = [-1, 2, x.size(1), 1, 1] style = style.view(shape) x = x * (style[:, 0] + 1.0) + style[:, 1] return x def get_inputs(): return [torch.rand([64, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 4, 1.0]
MergeLayer
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/ms/cmsuzohbg5nq52jnvirovzkvykrzzko5xomu7zyu5e5u2lhegppw.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.cat] # Source node to ATen node mapping: # x => cat # Graph fragment: # %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %primals_2], 1), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = (xindex // 8) x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + (x2), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/5b/c5br3r4gpi7zzaygqfdgcqeerwiekt2d2t2wkw4sj54lam6radgq.py # Topologically Sorted Source Nodes: [h], Original ATen: [aten.relu] # Source node to ATen node mapping: # h => relu # Graph fragment: # %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_4), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {}) triton_poi_fused_relu_1 = async_compile.triton('triton_poi_fused_relu_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 8), (8, 1)) assert_size_stride(primals_4, (4, ), (1, )) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(primals_1, primals_2, buf0, 32, grid=grid(32), stream=stream0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf0, reinterpret_tensor(primals_3, (8, 4), (1, 8), 0), out=buf1) del primals_3 buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [h], Original ATen: [aten.relu] triton_poi_fused_relu_1.run(buf2, primals_4, 16, grid=grid(16), stream=stream0) del primals_4 buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm] extern_kernels.addmm(primals_6, buf2, reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf3) del primals_6 return (buf3, buf0, buf2, primals_5, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch class MergeLayer(torch.nn.Module): """(dim1+dim2)->dim3->dim4""" def __init__(self, dim1, dim2, dim3, dim4): super().__init__() self.fc1 = torch.nn.Linear(dim1 + dim2, dim3) self.fc2 = torch.nn.Linear(dim3, dim4) self.act = torch.nn.ReLU() torch.nn.init.xavier_normal_(self.fc1.weight) torch.nn.init.xavier_normal_(self.fc2.weight) def forward(self, x1, x2): x = torch.cat([x1, x2], dim=1) h = self.act(self.fc1(x)) return self.fc2(h) def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'dim1': 4, 'dim2': 4, 'dim3': 4, 'dim4': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x2, tmp10, xmask) @triton.jit def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 8), (8, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(32)](primals_1, primals_2, buf0, 32, XBLOCK=32, num_warps=1, num_stages=1) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf0, reinterpret_tensor(primals_3, (8, 4), (1, 8 ), 0), out=buf1) del primals_3 buf2 = buf1 del buf1 triton_poi_fused_relu_1[grid(16)](buf2, primals_4, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_4 buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_6, buf2, reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf3) del primals_6 return buf3, buf0, buf2, primals_5 class MergeLayerNew(torch.nn.Module): """(dim1+dim2)->dim3->dim4""" def __init__(self, dim1, dim2, dim3, dim4): super().__init__() self.fc1 = torch.nn.Linear(dim1 + dim2, dim3) self.fc2 = torch.nn.Linear(dim3, dim4) self.act = torch.nn.ReLU() torch.nn.init.xavier_normal_(self.fc1.weight) torch.nn.init.xavier_normal_(self.fc2.weight) def forward(self, input_0, input_1): primals_3 = self.fc1.weight primals_4 = self.fc1.bias primals_1 = self.fc2.weight primals_6 = self.fc2.bias primals_2 = input_0 primals_5 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
Aryn-VG/ASTGN-cDB
MergeLayer
false
1,981
[ "MIT" ]
0
a9977736a361adac9a7b6f8bad2b5317651be36a
https://github.com/Aryn-VG/ASTGN-cDB/tree/a9977736a361adac9a7b6f8bad2b5317651be36a
import torch class Model(torch.nn.Module): """(dim1+dim2)->dim3->dim4""" def __init__(self, dim1, dim2, dim3, dim4): super().__init__() self.fc1 = torch.nn.Linear(dim1 + dim2, dim3) self.fc2 = torch.nn.Linear(dim3, dim4) self.act = torch.nn.ReLU() torch.nn.init.xavier_normal_(self.fc1.weight) torch.nn.init.xavier_normal_(self.fc2.weight) def forward(self, x1, x2): x = torch.cat([x1, x2], dim=1) h = self.act(self.fc1(x)) return self.fc2(h) def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [4, 4, 4, 4]
LayerNorm
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/hp/chpdwpegv6lvistek2wqgimtufecqvfp6grp5rpblk5yjicjzqd2.py # Topologically Sorted Source Nodes: [ret], Original ATen: [aten.native_layer_norm] # Source node to ATen node mapping: # ret => add, rsqrt, var_mean # Graph fragment: # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_1, [3]), kwargs = {correction: 0, keepdim: True}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) triton_poi_fused_native_layer_norm_0 = async_compile.triton('triton_poi_fused_native_layer_norm_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + (x0), tmp8, xmask) tl.store(out_ptr1 + (x0), tmp23, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/lh/clhh73owbiuj4adasmetdqsot2nlmw2ljupnw2q4yt3du76mikww.py # Topologically Sorted Source Nodes: [ret], Original ATen: [aten.native_layer_norm] # Source node to ATen node mapping: # ret => add, add_1, mul, mul_1, rsqrt, sub, var_mean # Graph fragment: # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_1, [3]), kwargs = {correction: 0, keepdim: True}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_1, %getitem_1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_2), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_3), kwargs = {}) triton_poi_fused_native_layer_norm_1 = async_compile.triton('triton_poi_fused_native_layer_norm_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) # Topologically Sorted Source Nodes: [ret], Original ATen: [aten.native_layer_norm] stream0 = get_raw_stream(0) triton_poi_fused_native_layer_norm_0.run(primals_1, buf0, buf1, 64, grid=grid(64), stream=stream0) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [ret], Original ATen: [aten.native_layer_norm] triton_poi_fused_native_layer_norm_1.run(primals_1, buf0, buf1, primals_2, primals_3, buf2, 256, grid=grid(256), stream=stream0) del buf0 del buf1 del primals_2 del primals_3 return (buf2, primals_1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn class LayerNorm(nn.LayerNorm): """Subclass torch's LayerNorm to handle fp16.""" def forward(self, x: 'torch.Tensor'): orig_type = x.dtype ret = super().forward(x.type(torch.float32)) return ret.type(orig_type) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'normalized_shape': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) get_raw_stream(0) triton_poi_fused_native_layer_norm_0[grid(64)](primals_1, buf0, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_native_layer_norm_1[grid(256)](primals_1, buf0, buf1, primals_2, primals_3, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf0 del buf1 del primals_2 del primals_3 return buf2, primals_1 class LayerNormNew(nn.LayerNorm): """Subclass torch's LayerNorm to handle fp16.""" def forward(self, input_0): primals_2 = self.weight primals_3 = self.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
Artanic30/RentalPrediction
LayerNorm
false
1,982
[ "MIT" ]
0
5804ab9b453d2a40bce2bb304c31efc98a803ed8
https://github.com/Artanic30/RentalPrediction/tree/5804ab9b453d2a40bce2bb304c31efc98a803ed8
import torch from torch import nn class Model(nn.LayerNorm): """Subclass torch's LayerNorm to handle fp16.""" def forward(self, x: 'torch.Tensor'): orig_type = x.dtype ret = super().forward(x.type(torch.float32)) return ret.type(orig_type) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4]
MaskedCrossEntropyCriterion
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/nr/cnrkptzsuv7qm3ss6i6xgoxkou23z76h2vmwqkwz2zkgpdbxhedc.py # Topologically Sorted Source Nodes: [lprobs], Original ATen: [aten._log_softmax] # Source node to ATen node mapping: # lprobs => amax, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%arg0_1, [-1], True), kwargs = {}) # %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %amax), kwargs = {}) triton_poi_fused__log_softmax_0 = async_compile.triton('triton_poi_fused__log_softmax_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/d5/cd54homrt7sbya3jvabh6rff2je4vu3jgvmnjl34jkxapprxpjvn.py # Topologically Sorted Source Nodes: [setitem, gather, nll_loss], Original ATen: [aten.lift_fresh, aten.index_put, aten.gather, aten.neg] # Source node to ATen node mapping: # gather => gather # nll_loss => neg # setitem => full_default, index_put # Graph fragment: # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0), kwargs = {dtype: torch.int64, layout: torch.strided, device: cpu, pin_memory: False}) # %index_put : [num_users=1] = call_function[target=torch.ops.aten.index_put_.default](args = (%arg1_1, [%eq], %full_default), kwargs = {}) # %gather : [num_users=1] = call_function[target=torch.ops.aten.gather.default](args = (%view, -1, %unsqueeze_1), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%gather,), kwargs = {}) triton_poi_fused_gather_index_put_lift_fresh_neg_1 = async_compile.triton('triton_poi_fused_gather_index_put_lift_fresh_neg_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*i64', 1: '*fp32', 2: '*i64', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_gather_index_put_lift_fresh_neg_1', 'mutated_arg_names': ['in_ptr0', 'out_ptr0'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_gather_index_put_lift_fresh_neg_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp11 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp19 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp1 = tl.full([1], -100, tl.int64) tmp2 = tmp0 == tmp1 tmp3 = tl.full([1], 0, tl.int64) tmp4 = tl.where(tmp2, tmp3, tmp0) tmp5 = tl.full([XBLOCK], 4, tl.int32) tmp6 = tmp4 + tmp5 tmp7 = tmp4 < 0 tmp8 = tl.where(tmp7, tmp6, tmp4) tl.device_assert(((0 <= tmp8) & (tmp8 < 4)) | ~(xmask), "index out of bounds: 0 <= tmp8 < 4") tmp10 = tl.load(in_ptr1 + (tmp8 + (4*x0)), xmask, eviction_policy='evict_last') tmp12 = tl_math.exp(tmp11) tmp14 = tl_math.exp(tmp13) tmp15 = tmp12 + tmp14 tmp17 = tl_math.exp(tmp16) tmp18 = tmp15 + tmp17 tmp20 = tl_math.exp(tmp19) tmp21 = tmp18 + tmp20 tmp22 = tl_math.log(tmp21) tmp23 = tmp10 - tmp22 tmp24 = -tmp23 tl.store(out_ptr0 + (x0), tmp4, xmask) tl.store(out_ptr1 + (x0), tmp24, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [lprobs], Original ATen: [aten._log_softmax] stream0 = get_raw_stream(0) triton_poi_fused__log_softmax_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 buf3 = empty_strided_cuda((4, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [setitem, gather, nll_loss], Original ATen: [aten.lift_fresh, aten.index_put, aten.gather, aten.neg] triton_poi_fused_gather_index_put_lift_fresh_neg_1.run(arg1_1, buf0, arg1_1, buf3, 4, grid=grid(4), stream=stream0) del arg1_1 del buf0 return (reinterpret_tensor(buf3, (4, ), (1, ), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.int64) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn from torch.nn.modules.loss import _WeightedLoss class MaskedCrossEntropyCriterion(_WeightedLoss): def __init__(self, ignore_index=[-100], reduce=None): super(MaskedCrossEntropyCriterion, self).__init__() self.padding_idx = ignore_index self.reduce = reduce def forward(self, outputs, targets): lprobs = nn.functional.log_softmax(outputs, dim=-1) lprobs = lprobs.view(-1, lprobs.size(-1)) for idx in self.padding_idx: targets[targets == idx] = 0 nll_loss = -lprobs.gather(dim=-1, index=targets.unsqueeze(1)) if self.reduce: nll_loss = nll_loss.sum() return nll_loss.squeeze() def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.ones([4], dtype=torch.int64)] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch.nn.modules.loss import _WeightedLoss assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_gather_index_put_lift_fresh_neg_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp11 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp16 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp19 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp1 = tl.full([1], -100, tl.int64) tmp2 = tmp0 == tmp1 tmp3 = tl.full([1], 0, tl.int64) tmp4 = tl.where(tmp2, tmp3, tmp0) tmp5 = tl.full([XBLOCK], 4, tl.int32) tmp6 = tmp4 + tmp5 tmp7 = tmp4 < 0 tmp8 = tl.where(tmp7, tmp6, tmp4) tl.device_assert((0 <= tmp8) & (tmp8 < 4) | ~xmask, 'index out of bounds: 0 <= tmp8 < 4') tmp10 = tl.load(in_ptr1 + (tmp8 + 4 * x0), xmask, eviction_policy= 'evict_last') tmp12 = tl_math.exp(tmp11) tmp14 = tl_math.exp(tmp13) tmp15 = tmp12 + tmp14 tmp17 = tl_math.exp(tmp16) tmp18 = tmp15 + tmp17 tmp20 = tl_math.exp(tmp19) tmp21 = tmp18 + tmp20 tmp22 = tl_math.log(tmp21) tmp23 = tmp10 - tmp22 tmp24 = -tmp23 tl.store(out_ptr0 + x0, tmp4, xmask) tl.store(out_ptr1 + x0, tmp24, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__log_softmax_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 buf3 = empty_strided_cuda((4, 1), (1, 1), torch.float32) triton_poi_fused_gather_index_put_lift_fresh_neg_1[grid(4)](arg1_1, buf0, arg1_1, buf3, 4, XBLOCK=4, num_warps=1, num_stages=1) del arg1_1 del buf0 return reinterpret_tensor(buf3, (4,), (1,), 0), class MaskedCrossEntropyCriterionNew(_WeightedLoss): def __init__(self, ignore_index=[-100], reduce=None): super(MaskedCrossEntropyCriterionNew, self).__init__() self.padding_idx = ignore_index self.reduce = reduce def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
ArkanDH/Team5-Inverse-Cooking-Stuff
MaskedCrossEntropyCriterion
false
1,983
[ "MIT" ]
0
ec224918b25fb7a04aa09995e4d11804448df7dd
https://github.com/ArkanDH/Team5-Inverse-Cooking-Stuff/tree/ec224918b25fb7a04aa09995e4d11804448df7dd
import torch import torch.nn as nn from torch.nn.modules.loss import _WeightedLoss class Model(_WeightedLoss): def __init__(self, ignore_index=[-100], reduce=None): super().__init__() self.padding_idx = ignore_index self.reduce = reduce def forward(self, outputs, targets): lprobs = nn.functional.log_softmax(outputs, dim=-1) lprobs = lprobs.view(-1, lprobs.size(-1)) for idx in self.padding_idx: targets[targets == idx] = 0 nll_loss = -lprobs.gather(dim=-1, index=targets.unsqueeze(1)) if self.reduce: nll_loss = nll_loss.sum() return nll_loss.squeeze() def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.ones([4], dtype=torch.int64)] def get_init_inputs(): return []
PrecomputedNorm
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/yi/cyiakmznzllqklcnfmh5hu3smacswbrh4drlxafkndcx6god674a.py # Topologically Sorted Source Nodes: [sub, truediv], Original ATen: [aten.sub, aten.div] # Source node to ATen node mapping: # sub => sub # truediv => div # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, 4), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, 4), kwargs = {}) triton_poi_fused_div_sub_0 = async_compile.triton('triton_poi_fused_div_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 4.0 tmp2 = tmp0 - tmp1 tmp3 = 0.25 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + (x0), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [sub, truediv], Original ATen: [aten.sub, aten.div] stream0 = get_raw_stream(0) triton_poi_fused_div_sub_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class PrecomputedNorm(nn.Module): """Normalization using Pre-computed Mean/Std. Args: stats: Precomputed (mean, std). axis: Axis setting used to calculate mean/variance. """ def __init__(self, stats, axis=[1, 2]): super().__init__() self.axis = axis self.mean, self.std = stats def forward(self, X: 'torch.Tensor') ->torch.Tensor: return (X - self.mean) / self.std def __repr__(self): format_string = (self.__class__.__name__ + f'(mean={self.mean}, std={self.std}, axis={self.axis})') return format_string def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'stats': [4, 4]}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_div_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 4.0 tmp2 = tmp0 - tmp1 tmp3 = 0.25 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + x0, tmp4, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_sub_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf0, class PrecomputedNormNew(nn.Module): """Normalization using Pre-computed Mean/Std. Args: stats: Precomputed (mean, std). axis: Axis setting used to calculate mean/variance. """ def __init__(self, stats, axis=[1, 2]): super().__init__() self.axis = axis self.mean, self.std = stats def __repr__(self): format_string = (self.__class__.__name__ + f'(mean={self.mean}, std={self.std}, axis={self.axis})') return format_string def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
AyushExel/s3prl
PrecomputedNorm
false
1,984
[ "MIT" ]
0
6531904e9621a778978b9cfef3ba9f582e56639a
https://github.com/AyushExel/s3prl/tree/6531904e9621a778978b9cfef3ba9f582e56639a
import torch import torch.nn as nn class Model(nn.Module): """Normalization using Pre-computed Mean/Std. Args: stats: Precomputed (mean, std). axis: Axis setting used to calculate mean/variance. """ def __init__(self, stats, axis=[1, 2]): super().__init__() self.axis = axis self.mean, self.std = stats def forward(self, X: 'torch.Tensor') ->torch.Tensor: return (X - self.mean) / self.std def __repr__(self): format_string = (self.__class__.__name__ + f'(mean={self.mean}, std={self.std}, axis={self.axis})') return format_string def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
AttentivePooling
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/r3/cr3febcwm3t44fuoitsx3ou2p6xg4sk4f7unagmmrvffasxf47te.py # Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # relu => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, xmask) tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/uu/cuuixbc7clu3x4xnld3clwlzm4bwe3sea4shtlguimyhwbrzdnjg.py # Topologically Sorted Source Nodes: [att_logits_1, softmax], Original ATen: [aten.add, aten._softmax] # Source node to ATen node mapping: # att_logits_1 => add # softmax => amax, exp, sub, sum_1 # Graph fragment: # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_6, %squeeze), kwargs = {}) # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%add, [-1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) triton_poi_fused__softmax_add_1 = async_compile.triton('triton_poi_fused__softmax_add_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_add_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_add_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_ptr0 + (4*x2), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + (0)) tmp3 = tl.broadcast_to(tmp2, [XBLOCK]) tmp6 = tl.load(in_ptr0 + (1 + (4*x2)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (2 + (4*x2)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr0 + (3 + (4*x2)), xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp4 = tmp1 + tmp3 tmp5 = tmp0 + tmp4 tmp8 = tmp7 + tmp3 tmp9 = tmp6 + tmp8 tmp10 = triton_helpers.maximum(tmp5, tmp9) tmp13 = tmp12 + tmp3 tmp14 = tmp11 + tmp13 tmp15 = triton_helpers.maximum(tmp10, tmp14) tmp18 = tmp17 + tmp3 tmp19 = tmp16 + tmp18 tmp20 = triton_helpers.maximum(tmp15, tmp19) tmp21 = tmp5 - tmp20 tmp22 = tl_math.exp(tmp21) tmp23 = tmp9 - tmp20 tmp24 = tl_math.exp(tmp23) tmp25 = tmp22 + tmp24 tmp26 = tmp14 - tmp20 tmp27 = tl_math.exp(tmp26) tmp28 = tmp25 + tmp27 tmp29 = tmp19 - tmp20 tmp30 = tl_math.exp(tmp29) tmp31 = tmp28 + tmp30 tl.store(out_ptr0 + (x2), tmp20, xmask) tl.store(out_ptr1 + (x2), tmp31, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/26/c26r27cnuaxawyutnmstc5xa4iiskv2zoymvaaom3awrmuaav47p.py # Topologically Sorted Source Nodes: [att_logits_1, softmax], Original ATen: [aten.add, aten._softmax] # Source node to ATen node mapping: # att_logits_1 => add # softmax => div, exp, sub # Graph fragment: # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_6, %squeeze), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_add_2 = async_compile.triton('triton_poi_fused__softmax_add_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_add_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_add_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x4 = xindex % 64 x5 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr1 + (x4), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + (0)) tmp3 = tl.broadcast_to(tmp2, [XBLOCK]) tmp6 = tl.load(in_ptr3 + (x5), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr4 + (x5), xmask, eviction_policy='evict_last') tmp4 = tmp1 + tmp3 tmp5 = tmp0 + tmp4 tmp7 = tmp5 - tmp6 tmp8 = tl_math.exp(tmp7) tmp10 = tmp8 / tmp9 tl.store(out_ptr0 + (x3), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/2q/c2qalxfq2yxll6mbnxojhbih4eltcgaqrxucbiibajluczsoe5lz.py # Topologically Sorted Source Nodes: [mul, utter_rep], Original ATen: [aten.mul, aten.sum] # Source node to ATen node mapping: # mul => mul # utter_rep => sum_2 # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_3, %unsqueeze), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {}) triton_poi_fused_mul_sum_3 = async_compile.triton('triton_poi_fused_mul_sum_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sum_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_sum_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex % 64 x1 = (xindex // 4) % 16 x2 = (xindex // 64) x4 = xindex tmp0 = tl.load(in_ptr0 + (x3), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x1 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (64 + x3), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (16 + x1 + (64*x2)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (128 + x3), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (32 + x1 + (64*x2)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (192 + x3), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (48 + x1 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 * tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 * tmp12 tmp14 = tmp10 + tmp13 tl.store(out_ptr0 + (x4), tmp14, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (1, 4), (4, 1)) assert_size_stride(primals_5, (1, ), (1, )) assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf7, 256, grid=grid(256), stream=stream0) del primals_2 buf2 = empty_strided_cuda((64, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 1), (1, 4), 0), out=buf2) buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) # Topologically Sorted Source Nodes: [att_logits_1, softmax], Original ATen: [aten.add, aten._softmax] triton_poi_fused__softmax_add_1.run(primals_6, buf2, primals_5, buf3, buf4, 64, grid=grid(64), stream=stream0) buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [att_logits_1, softmax], Original ATen: [aten.add, aten._softmax] triton_poi_fused__softmax_add_2.run(primals_6, buf2, primals_5, buf3, buf4, buf5, 256, grid=grid(256), stream=stream0) del buf2 del buf3 del buf4 del primals_5 del primals_6 buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul, utter_rep], Original ATen: [aten.mul, aten.sum] triton_poi_fused_mul_sum_3.run(primals_3, buf5, buf6, 256, grid=grid(256), stream=stream0) return (buf6, reinterpret_tensor(buf5, (4, 4, 4, 4, 1), (64, 16, 4, 1, 1), 0), primals_3, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), buf5, primals_4, buf7, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class AttentivePooling(nn.Module): """ Implementation of Attentive Pooling """ def __init__(self, input_dim, **kwargs): super(AttentivePooling, self).__init__() self.W_a = nn.Linear(input_dim, input_dim) self.W = nn.Linear(input_dim, 1) self.act_fn = nn.ReLU() self.softmax = nn.functional.softmax def forward(self, batch_rep, att_mask): """ input: batch_rep : size (B, T, H), B: batch size, T: sequence length, H: Hidden dimension attention_weight: att_w : size (B, T, 1) return: utter_rep: size (B, H) """ att_logits = self.W(self.act_fn(self.W_a(batch_rep))).squeeze(-1) att_logits = att_mask + att_logits att_w = self.softmax(att_logits, dim=-1).unsqueeze(-1) utter_rep = torch.sum(batch_rep * att_w, dim=1) return utter_rep, att_w def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused__softmax_add_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_ptr0 + 4 * x2, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + 0) tmp3 = tl.broadcast_to(tmp2, [XBLOCK]) tmp6 = tl.load(in_ptr0 + (1 + 4 * x2), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (2 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp16 = tl.load(in_ptr0 + (3 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp17 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp4 = tmp1 + tmp3 tmp5 = tmp0 + tmp4 tmp8 = tmp7 + tmp3 tmp9 = tmp6 + tmp8 tmp10 = triton_helpers.maximum(tmp5, tmp9) tmp13 = tmp12 + tmp3 tmp14 = tmp11 + tmp13 tmp15 = triton_helpers.maximum(tmp10, tmp14) tmp18 = tmp17 + tmp3 tmp19 = tmp16 + tmp18 tmp20 = triton_helpers.maximum(tmp15, tmp19) tmp21 = tmp5 - tmp20 tmp22 = tl_math.exp(tmp21) tmp23 = tmp9 - tmp20 tmp24 = tl_math.exp(tmp23) tmp25 = tmp22 + tmp24 tmp26 = tmp14 - tmp20 tmp27 = tl_math.exp(tmp26) tmp28 = tmp25 + tmp27 tmp29 = tmp19 - tmp20 tmp30 = tl_math.exp(tmp29) tmp31 = tmp28 + tmp30 tl.store(out_ptr0 + x2, tmp20, xmask) tl.store(out_ptr1 + x2, tmp31, xmask) @triton.jit def triton_poi_fused__softmax_add_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x4 = xindex % 64 x5 = xindex // 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x4, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + 0) tmp3 = tl.broadcast_to(tmp2, [XBLOCK]) tmp6 = tl.load(in_ptr3 + x5, xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr4 + x5, xmask, eviction_policy='evict_last') tmp4 = tmp1 + tmp3 tmp5 = tmp0 + tmp4 tmp7 = tmp5 - tmp6 tmp8 = tl_math.exp(tmp7) tmp10 = tmp8 / tmp9 tl.store(out_ptr0 + x3, tmp10, xmask) @triton.jit def triton_poi_fused_mul_sum_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex % 64 x1 = xindex // 4 % 16 x2 = xindex // 64 x4 = xindex tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x1 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (64 + x3), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (16 + x1 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp7 = tl.load(in_ptr0 + (128 + x3), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (32 + x1 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp11 = tl.load(in_ptr0 + (192 + x3), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (48 + x1 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tmp0 * tmp1 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 * tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 * tmp12 tmp14 = tmp10 + tmp13 tl.store(out_ptr0 + x4, tmp14, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (1, 4), (4, 1)) assert_size_stride(primals_5, (1,), (1,)) assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1, primals_2, buf7, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 1), (1, 4), 0), out=buf2) buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) triton_poi_fused__softmax_add_1[grid(64)](primals_6, buf2, primals_5, buf3, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1) buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_add_2[grid(256)](primals_6, buf2, primals_5, buf3, buf4, buf5, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf2 del buf3 del buf4 del primals_5 del primals_6 buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_mul_sum_3[grid(256)](primals_3, buf5, buf6, 256, XBLOCK=256, num_warps=4, num_stages=1) return buf6, reinterpret_tensor(buf5, (4, 4, 4, 4, 1), (64, 16, 4, 1, 1), 0 ), primals_3, reinterpret_tensor(buf1, (64, 4), (4, 1), 0 ), buf5, primals_4, buf7 class AttentivePoolingNew(nn.Module): """ Implementation of Attentive Pooling """ def __init__(self, input_dim, **kwargs): super(AttentivePoolingNew, self).__init__() self.W_a = nn.Linear(input_dim, input_dim) self.W = nn.Linear(input_dim, 1) self.act_fn = nn.ReLU() self.softmax = nn.functional.softmax def forward(self, input_0, input_1): primals_1 = self.W_a.weight primals_2 = self.W_a.bias primals_4 = self.W.weight primals_5 = self.W.bias primals_3 = input_0 primals_6 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0], output[1]
AyushExel/s3prl
AttentivePooling
false
1,985
[ "MIT" ]
0
6531904e9621a778978b9cfef3ba9f582e56639a
https://github.com/AyushExel/s3prl/tree/6531904e9621a778978b9cfef3ba9f582e56639a
import torch import torch.nn as nn class Model(nn.Module): """ Implementation of Attentive Pooling """ def __init__(self, input_dim, **kwargs): super().__init__() self.W_a = nn.Linear(input_dim, input_dim) self.W = nn.Linear(input_dim, 1) self.act_fn = nn.ReLU() self.softmax = nn.functional.softmax def forward(self, batch_rep, att_mask): """ input: batch_rep : size (B, T, H), B: batch size, T: sequence length, H: Hidden dimension attention_weight: att_w : size (B, T, 1) return: utter_rep: size (B, H) """ att_logits = self.W(self.act_fn(self.W_a(batch_rep))).squeeze(-1) att_logits = att_mask + att_logits att_w = self.softmax(att_logits, dim=-1).unsqueeze(-1) utter_rep = torch.sum(batch_rep * att_w, dim=1) return utter_rep, att_w def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4]
ToRGB
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/wi/cwiyl3lwwtancorrifw77xt3aqb4lermdintht45zvkj3bg54nbl.py # Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul] # Source node to ATen node mapping: # mul => mul # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, 0.5), kwargs = {}) triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/2o/c2oqkq7zaubqmw7vuixxlseb2ff5jzqqbyczicxlmsahuxwdpdyp.py # Topologically Sorted Source Nodes: [mul_1], Original ATen: [aten.mul] # Source node to ATen node mapping: # mul_1 => mul_1 # Graph fragment: # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_3, 1), kwargs = {}) triton_poi_fused_mul_1 = async_compile.triton('triton_poi_fused_mul_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/on/conl6eemb3vyjzkllydlouehrcxphkzifo5kmslz6fgiz6ixsw5h.py # Topologically Sorted Source Nodes: [mul_2, weight], Original ATen: [aten.mul] # Source node to ATen node mapping: # mul_2 => mul_2 # weight => mul_3 # Graph fragment: # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_5, 0.5), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_2, %view), kwargs = {}) triton_poi_fused_mul_2 = async_compile.triton('triton_poi_fused_mul_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 48 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex % 12 x0 = xindex % 4 x2 = (xindex // 12) x4 = xindex tmp0 = tl.load(in_ptr0 + (x3), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x0 + (4*x2)), xmask, eviction_policy='evict_last') tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + (x4), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/go/cgoav6av4bzem4wmdmkiowlmjpeiubwc67bqu6es4aivwlfpxzhh.py # Topologically Sorted Source Nodes: [out_3], Original ATen: [aten.add] # Source node to ATen node mapping: # out_3 => add # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_3, %primals_6), kwargs = {}) triton_poi_fused_add_3 = async_compile.triton('triton_poi_fused_add_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 3 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (1, 3, 4, 1, 1), (12, 4, 1, 1, 1)) assert_size_stride(primals_6, (1, 3, 1, 1), (3, 1, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_mul_0.run(primals_2, buf0, 16, grid=grid(16), stream=stream0) del primals_2 buf1 = empty_strided_cuda((4, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [mul_1], Original ATen: [aten.mul] triton_poi_fused_mul_1.run(primals_3, buf1, 4, grid=grid(4), stream=stream0) del primals_3 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul_1, out], Original ATen: [aten.mul, aten.addmm] extern_kernels.addmm(buf1, primals_4, reinterpret_tensor(buf0, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del buf0 del buf1 buf3 = empty_strided_cuda((4, 3, 4, 1, 1), (12, 4, 1, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [mul_2, weight], Original ATen: [aten.mul] triton_poi_fused_mul_2.run(primals_5, buf2, buf3, 48, grid=grid(48), stream=stream0) # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.convolution] buf4 = extern_kernels.convolution(reinterpret_tensor(primals_1, (1, 16, 4, 4), (256, 16, 4, 1), 0), reinterpret_tensor(buf3, (12, 4, 1, 1), (4, 1, 0, 0), 0), stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=4, bias=None) assert_size_stride(buf4, (1, 12, 4, 4), (192, 16, 4, 1)) buf5 = reinterpret_tensor(buf4, (4, 3, 4, 4), (48, 16, 4, 1), 0); del buf4 # reuse # Topologically Sorted Source Nodes: [out_3], Original ATen: [aten.add] triton_poi_fused_add_3.run(buf5, primals_6, 192, grid=grid(192), stream=stream0) del primals_6 return (buf5, primals_4, primals_5, buf2, reinterpret_tensor(buf3, (12, 4, 1, 1), (4, 1, 1, 1), 0), reinterpret_tensor(primals_1, (1, 16, 4, 4), (256, 16, 4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((1, 3, 4, 1, 1), (12, 4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((1, 3, 1, 1), (3, 1, 1, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from torch.autograd import Function import math import torch from torch import nn import torch.nn.functional as F def fused_leaky_relu(input, bias, negative_slope=0.2, scale=2 ** 0.5): return FusedLeakyReLUFunction.apply(input, bias, negative_slope, scale) def make_kernel(k): k = torch.tensor(k, dtype=torch.float32) if k.ndim == 1: k = k[None, :] * k[:, None] k /= k.sum() return k def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)): out = UpFirDn2d.apply(input, kernel, (up, up), (down, down), (pad[0], pad[1], pad[0], pad[1])) return out class FusedLeakyReLUFunctionBackward(Function): @staticmethod def forward(ctx, grad_output, out, negative_slope, scale): ctx.save_for_backward(out) ctx.negative_slope = negative_slope ctx.scale = scale empty = grad_output.new_empty(0) grad_input = fused.fused_bias_act(grad_output, empty, out, 3, 1, negative_slope, scale) dim = [0] if grad_input.ndim > 2: dim += list(range(2, grad_input.ndim)) grad_bias = grad_input.sum(dim).detach() return grad_input, grad_bias @staticmethod def backward(ctx, gradgrad_input, gradgrad_bias): out, = ctx.saved_tensors gradgrad_out = fused.fused_bias_act(gradgrad_input, gradgrad_bias, out, 3, 1, ctx.negative_slope, ctx.scale) return gradgrad_out, None, None, None class FusedLeakyReLUFunction(Function): @staticmethod def forward(ctx, input, bias, negative_slope, scale): empty = input.new_empty(0) out = fused.fused_bias_act(input, bias, empty, 3, 0, negative_slope, scale) ctx.save_for_backward(out) ctx.negative_slope = negative_slope ctx.scale = scale return out @staticmethod def backward(ctx, grad_output): out, = ctx.saved_tensors grad_input, grad_bias = FusedLeakyReLUFunctionBackward.apply( grad_output, out, ctx.negative_slope, ctx.scale) return grad_input, grad_bias, None, None class EqualLinear(nn.Module): def __init__(self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1, activation=None): super().__init__() self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul)) if bias: self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init)) else: self.bias = None self.activation = activation self.scale = 1 / math.sqrt(in_dim) * lr_mul self.lr_mul = lr_mul def forward(self, input): if self.activation: out = F.linear(input, self.weight * self.scale) out = fused_leaky_relu(out, self.bias * self.lr_mul) else: out = F.linear(input, self.weight * self.scale, bias=self.bias * self.lr_mul) return out def __repr__(self): return ( f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})' ) class UpFirDn2dBackward(Function): @staticmethod def forward(ctx, grad_output, kernel, grad_kernel, up, down, pad, g_pad, in_size, out_size): up_x, up_y = up down_x, down_y = down g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1 = g_pad grad_output = grad_output.reshape(-1, out_size[0], out_size[1], 1) grad_input = upfirdn2d_op.upfirdn2d(grad_output, grad_kernel, down_x, down_y, up_x, up_y, g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1) grad_input = grad_input.view(in_size[0], in_size[1], in_size[2], in_size[3]) ctx.save_for_backward(kernel) pad_x0, pad_x1, pad_y0, pad_y1 = pad ctx.up_x = up_x ctx.up_y = up_y ctx.down_x = down_x ctx.down_y = down_y ctx.pad_x0 = pad_x0 ctx.pad_x1 = pad_x1 ctx.pad_y0 = pad_y0 ctx.pad_y1 = pad_y1 ctx.in_size = in_size ctx.out_size = out_size return grad_input @staticmethod def backward(ctx, gradgrad_input): kernel, = ctx.saved_tensors gradgrad_input = gradgrad_input.reshape(-1, ctx.in_size[2], ctx. in_size[3], 1) gradgrad_out = upfirdn2d_op.upfirdn2d(gradgrad_input, kernel, ctx. up_x, ctx.up_y, ctx.down_x, ctx.down_y, ctx.pad_x0, ctx.pad_x1, ctx.pad_y0, ctx.pad_y1) gradgrad_out = gradgrad_out.view(ctx.in_size[0], ctx.in_size[1], ctx.out_size[0], ctx.out_size[1]) return gradgrad_out, None, None, None, None, None, None, None, None class UpFirDn2d(Function): @staticmethod def forward(ctx, input, kernel, up, down, pad): up_x, up_y = up down_x, down_y = down pad_x0, pad_x1, pad_y0, pad_y1 = pad kernel_h, kernel_w = kernel.shape _batch, channel, in_h, in_w = input.shape ctx.in_size = input.shape input = input.reshape(-1, in_h, in_w, 1) ctx.save_for_backward(kernel, torch.flip(kernel, [0, 1])) out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1 out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1 ctx.out_size = out_h, out_w ctx.up = up_x, up_y ctx.down = down_x, down_y ctx.pad = pad_x0, pad_x1, pad_y0, pad_y1 g_pad_x0 = kernel_w - pad_x0 - 1 g_pad_y0 = kernel_h - pad_y0 - 1 g_pad_x1 = in_w * up_x - out_w * down_x + pad_x0 - up_x + 1 g_pad_y1 = in_h * up_y - out_h * down_y + pad_y0 - up_y + 1 ctx.g_pad = g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1 out = upfirdn2d_op.upfirdn2d(input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1) out = out.view(-1, channel, out_h, out_w) return out @staticmethod def backward(ctx, grad_output): kernel, grad_kernel = ctx.saved_tensors grad_input = UpFirDn2dBackward.apply(grad_output, kernel, grad_kernel, ctx.up, ctx.down, ctx.pad, ctx.g_pad, ctx.in_size, ctx.out_size) return grad_input, None, None, None, None class Blur(nn.Module): def __init__(self, kernel, pad, upsample_factor=1): super().__init__() kernel = make_kernel(kernel) if upsample_factor > 1: kernel = kernel * upsample_factor ** 2 self.register_buffer('kernel', kernel) self.pad = pad def forward(self, input): out = upfirdn2d(input, self.kernel, pad=self.pad) return out class ModulatedConv2d(nn.Module): def __init__(self, in_channel, out_channel, kernel_size, style_dim, demodulate=True, upsample=False, downsample=False, blur_kernel=[1, 3, 3, 1]): super().__init__() self.eps = 1e-08 self.kernel_size = kernel_size self.in_channel = in_channel self.out_channel = out_channel self.upsample = upsample self.downsample = downsample if upsample: factor = 2 p = len(blur_kernel) - factor - (kernel_size - 1) pad0 = (p + 1) // 2 + factor - 1 pad1 = p // 2 + 1 self.blur = Blur(blur_kernel, pad=(pad0, pad1), upsample_factor =factor) if downsample: factor = 2 p = len(blur_kernel) - factor + (kernel_size - 1) pad0 = (p + 1) // 2 pad1 = p // 2 self.blur = Blur(blur_kernel, pad=(pad0, pad1)) fan_in = in_channel * kernel_size ** 2 self.scale = 1 / math.sqrt(fan_in) self.padding = kernel_size // 2 self.weight = nn.Parameter(torch.randn(1, out_channel, in_channel, kernel_size, kernel_size)) self.modulation = EqualLinear(style_dim, in_channel, bias_init=1) self.demodulate = demodulate def __repr__(self): return ( f'{self.__class__.__name__}({self.in_channel}, {self.out_channel}, {self.kernel_size}, upsample={self.upsample}, downsample={self.downsample})' ) def forward(self, input, style): batch, in_channel, height, width = input.shape style = self.modulation(style).view(batch, 1, in_channel, 1, 1) weight = self.scale * self.weight * style if self.demodulate: demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) + 1e-08) weight = weight * demod.view(batch, self.out_channel, 1, 1, 1) weight = weight.view(batch * self.out_channel, in_channel, self. kernel_size, self.kernel_size) if self.upsample: input = input.view(1, batch * in_channel, height, width) weight = weight.view(batch, self.out_channel, in_channel, self. kernel_size, self.kernel_size) weight = weight.transpose(1, 2).reshape(batch * in_channel, self.out_channel, self.kernel_size, self.kernel_size) out = F.conv_transpose2d(input, weight, padding=0, stride=2, groups=batch) _, _, height, width = out.shape out = out.view(batch, self.out_channel, height, width) out = self.blur(out) elif self.downsample: input = self.blur(input) _, _, height, width = input.shape input = input.view(1, batch * in_channel, height, width) out = F.conv2d(input, weight, padding=0, stride=2, groups=batch) _, _, height, width = out.shape out = out.view(batch, self.out_channel, height, width) else: input = input.view(1, batch * in_channel, height, width) out = F.conv2d(input, weight, padding=self.padding, groups=batch) _, _, height, width = out.shape out = out.view(batch, self.out_channel, height, width) return out class Upsample(nn.Module): def __init__(self, kernel, factor=2): super().__init__() self.factor = factor kernel = make_kernel(kernel) * factor ** 2 self.register_buffer('kernel', kernel) p = kernel.shape[0] - factor pad0 = (p + 1) // 2 + factor - 1 pad1 = p // 2 self.pad = pad0, pad1 def forward(self, input): out = upfirdn2d(input, self.kernel, up=self.factor, down=1, pad= self.pad) return out class ToRGB(nn.Module): def __init__(self, in_channel, style_dim, upsample=True, blur_kernel=[1, 3, 3, 1]): super().__init__() if upsample: self.upsample = Upsample(blur_kernel) self.conv = ModulatedConv2d(in_channel, 3, 1, style_dim, demodulate =False) self.bias = nn.Parameter(torch.zeros(1, 3, 1, 1)) def forward(self, input, style, skip=None): out = self.conv(input, style) out = out + self.bias if skip is not None: skip = self.upsample(skip) out = out + skip return out def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'in_channel': 4, 'style_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch.autograd import Function import math from torch import nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_mul_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_mul_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 48 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex % 12 x0 = xindex % 4 x2 = xindex // 12 x4 = xindex tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + x4, tmp4, xmask) @triton.jit def triton_poi_fused_add_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 3 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (1, 3, 4, 1, 1), (12, 4, 1, 1, 1)) assert_size_stride(primals_6, (1, 3, 1, 1), (3, 1, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(16)](primals_2, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_2 buf1 = empty_strided_cuda((4,), (1,), torch.float32) triton_poi_fused_mul_1[grid(4)](primals_3, buf1, 4, XBLOCK=4, num_warps=1, num_stages=1) del primals_3 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(buf1, primals_4, reinterpret_tensor(buf0, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del buf0 del buf1 buf3 = empty_strided_cuda((4, 3, 4, 1, 1), (12, 4, 1, 1, 1), torch. float32) triton_poi_fused_mul_2[grid(48)](primals_5, buf2, buf3, 48, XBLOCK= 64, num_warps=1, num_stages=1) buf4 = extern_kernels.convolution(reinterpret_tensor(primals_1, (1, 16, 4, 4), (256, 16, 4, 1), 0), reinterpret_tensor(buf3, (12, 4, 1, 1), (4, 1, 0, 0), 0), stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=4, bias=None) assert_size_stride(buf4, (1, 12, 4, 4), (192, 16, 4, 1)) buf5 = reinterpret_tensor(buf4, (4, 3, 4, 4), (48, 16, 4, 1), 0) del buf4 triton_poi_fused_add_3[grid(192)](buf5, primals_6, 192, XBLOCK=256, num_warps=4, num_stages=1) del primals_6 return buf5, primals_4, primals_5, buf2, reinterpret_tensor(buf3, (12, 4, 1, 1), (4, 1, 1, 1), 0), reinterpret_tensor(primals_1, (1, 16, 4, 4), (256, 16, 4, 1), 0) def fused_leaky_relu(input, bias, negative_slope=0.2, scale=2 ** 0.5): return FusedLeakyReLUFunction.apply(input, bias, negative_slope, scale) def make_kernel(k): k = torch.tensor(k, dtype=torch.float32) if k.ndim == 1: k = k[None, :] * k[:, None] k /= k.sum() return k def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)): out = UpFirDn2d.apply(input, kernel, (up, up), (down, down), (pad[0], pad[1], pad[0], pad[1])) return out class FusedLeakyReLUFunctionBackward(Function): @staticmethod def forward(ctx, grad_output, out, negative_slope, scale): ctx.save_for_backward(out) ctx.negative_slope = negative_slope ctx.scale = scale empty = grad_output.new_empty(0) grad_input = fused.fused_bias_act(grad_output, empty, out, 3, 1, negative_slope, scale) dim = [0] if grad_input.ndim > 2: dim += list(range(2, grad_input.ndim)) grad_bias = grad_input.sum(dim).detach() return grad_input, grad_bias @staticmethod def backward(ctx, gradgrad_input, gradgrad_bias): out, = ctx.saved_tensors gradgrad_out = fused.fused_bias_act(gradgrad_input, gradgrad_bias, out, 3, 1, ctx.negative_slope, ctx.scale) return gradgrad_out, None, None, None class FusedLeakyReLUFunction(Function): @staticmethod def forward(ctx, input, bias, negative_slope, scale): empty = input.new_empty(0) out = fused.fused_bias_act(input, bias, empty, 3, 0, negative_slope, scale) ctx.save_for_backward(out) ctx.negative_slope = negative_slope ctx.scale = scale return out @staticmethod def backward(ctx, grad_output): out, = ctx.saved_tensors grad_input, grad_bias = FusedLeakyReLUFunctionBackward.apply( grad_output, out, ctx.negative_slope, ctx.scale) return grad_input, grad_bias, None, None class EqualLinear(nn.Module): def __init__(self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1, activation=None): super().__init__() self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul)) if bias: self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init)) else: self.bias = None self.activation = activation self.scale = 1 / math.sqrt(in_dim) * lr_mul self.lr_mul = lr_mul def forward(self, input): if self.activation: out = F.linear(input, self.weight * self.scale) out = fused_leaky_relu(out, self.bias * self.lr_mul) else: out = F.linear(input, self.weight * self.scale, bias=self.bias * self.lr_mul) return out def __repr__(self): return ( f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})' ) class UpFirDn2dBackward(Function): @staticmethod def forward(ctx, grad_output, kernel, grad_kernel, up, down, pad, g_pad, in_size, out_size): up_x, up_y = up down_x, down_y = down g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1 = g_pad grad_output = grad_output.reshape(-1, out_size[0], out_size[1], 1) grad_input = upfirdn2d_op.upfirdn2d(grad_output, grad_kernel, down_x, down_y, up_x, up_y, g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1) grad_input = grad_input.view(in_size[0], in_size[1], in_size[2], in_size[3]) ctx.save_for_backward(kernel) pad_x0, pad_x1, pad_y0, pad_y1 = pad ctx.up_x = up_x ctx.up_y = up_y ctx.down_x = down_x ctx.down_y = down_y ctx.pad_x0 = pad_x0 ctx.pad_x1 = pad_x1 ctx.pad_y0 = pad_y0 ctx.pad_y1 = pad_y1 ctx.in_size = in_size ctx.out_size = out_size return grad_input @staticmethod def backward(ctx, gradgrad_input): kernel, = ctx.saved_tensors gradgrad_input = gradgrad_input.reshape(-1, ctx.in_size[2], ctx. in_size[3], 1) gradgrad_out = upfirdn2d_op.upfirdn2d(gradgrad_input, kernel, ctx. up_x, ctx.up_y, ctx.down_x, ctx.down_y, ctx.pad_x0, ctx.pad_x1, ctx.pad_y0, ctx.pad_y1) gradgrad_out = gradgrad_out.view(ctx.in_size[0], ctx.in_size[1], ctx.out_size[0], ctx.out_size[1]) return gradgrad_out, None, None, None, None, None, None, None, None class UpFirDn2d(Function): @staticmethod def forward(ctx, input, kernel, up, down, pad): up_x, up_y = up down_x, down_y = down pad_x0, pad_x1, pad_y0, pad_y1 = pad kernel_h, kernel_w = kernel.shape _batch, channel, in_h, in_w = input.shape ctx.in_size = input.shape input = input.reshape(-1, in_h, in_w, 1) ctx.save_for_backward(kernel, torch.flip(kernel, [0, 1])) out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1 out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1 ctx.out_size = out_h, out_w ctx.up = up_x, up_y ctx.down = down_x, down_y ctx.pad = pad_x0, pad_x1, pad_y0, pad_y1 g_pad_x0 = kernel_w - pad_x0 - 1 g_pad_y0 = kernel_h - pad_y0 - 1 g_pad_x1 = in_w * up_x - out_w * down_x + pad_x0 - up_x + 1 g_pad_y1 = in_h * up_y - out_h * down_y + pad_y0 - up_y + 1 ctx.g_pad = g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1 out = upfirdn2d_op.upfirdn2d(input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1) out = out.view(-1, channel, out_h, out_w) return out @staticmethod def backward(ctx, grad_output): kernel, grad_kernel = ctx.saved_tensors grad_input = UpFirDn2dBackward.apply(grad_output, kernel, grad_kernel, ctx.up, ctx.down, ctx.pad, ctx.g_pad, ctx.in_size, ctx.out_size) return grad_input, None, None, None, None class Blur(nn.Module): def __init__(self, kernel, pad, upsample_factor=1): super().__init__() kernel = make_kernel(kernel) if upsample_factor > 1: kernel = kernel * upsample_factor ** 2 self.register_buffer('kernel', kernel) self.pad = pad def forward(self, input): out = upfirdn2d(input, self.kernel, pad=self.pad) return out class ModulatedConv2d(nn.Module): def __init__(self, in_channel, out_channel, kernel_size, style_dim, demodulate=True, upsample=False, downsample=False, blur_kernel=[1, 3, 3, 1]): super().__init__() self.eps = 1e-08 self.kernel_size = kernel_size self.in_channel = in_channel self.out_channel = out_channel self.upsample = upsample self.downsample = downsample if upsample: factor = 2 p = len(blur_kernel) - factor - (kernel_size - 1) pad0 = (p + 1) // 2 + factor - 1 pad1 = p // 2 + 1 self.blur = Blur(blur_kernel, pad=(pad0, pad1), upsample_factor =factor) if downsample: factor = 2 p = len(blur_kernel) - factor + (kernel_size - 1) pad0 = (p + 1) // 2 pad1 = p // 2 self.blur = Blur(blur_kernel, pad=(pad0, pad1)) fan_in = in_channel * kernel_size ** 2 self.scale = 1 / math.sqrt(fan_in) self.padding = kernel_size // 2 self.weight = nn.Parameter(torch.randn(1, out_channel, in_channel, kernel_size, kernel_size)) self.modulation = EqualLinear(style_dim, in_channel, bias_init=1) self.demodulate = demodulate def __repr__(self): return ( f'{self.__class__.__name__}({self.in_channel}, {self.out_channel}, {self.kernel_size}, upsample={self.upsample}, downsample={self.downsample})' ) def forward(self, input, style): batch, in_channel, height, width = input.shape style = self.modulation(style).view(batch, 1, in_channel, 1, 1) weight = self.scale * self.weight * style if self.demodulate: demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) + 1e-08) weight = weight * demod.view(batch, self.out_channel, 1, 1, 1) weight = weight.view(batch * self.out_channel, in_channel, self. kernel_size, self.kernel_size) if self.upsample: input = input.view(1, batch * in_channel, height, width) weight = weight.view(batch, self.out_channel, in_channel, self. kernel_size, self.kernel_size) weight = weight.transpose(1, 2).reshape(batch * in_channel, self.out_channel, self.kernel_size, self.kernel_size) out = F.conv_transpose2d(input, weight, padding=0, stride=2, groups=batch) _, _, height, width = out.shape out = out.view(batch, self.out_channel, height, width) out = self.blur(out) elif self.downsample: input = self.blur(input) _, _, height, width = input.shape input = input.view(1, batch * in_channel, height, width) out = F.conv2d(input, weight, padding=0, stride=2, groups=batch) _, _, height, width = out.shape out = out.view(batch, self.out_channel, height, width) else: input = input.view(1, batch * in_channel, height, width) out = F.conv2d(input, weight, padding=self.padding, groups=batch) _, _, height, width = out.shape out = out.view(batch, self.out_channel, height, width) return out class Upsample(nn.Module): def __init__(self, kernel, factor=2): super().__init__() self.factor = factor kernel = make_kernel(kernel) * factor ** 2 self.register_buffer('kernel', kernel) p = kernel.shape[0] - factor pad0 = (p + 1) // 2 + factor - 1 pad1 = p // 2 self.pad = pad0, pad1 def forward(self, input): out = upfirdn2d(input, self.kernel, up=self.factor, down=1, pad= self.pad) return out class ToRGBNew(nn.Module): def __init__(self, in_channel, style_dim, upsample=True, blur_kernel=[1, 3, 3, 1]): super().__init__() if upsample: self.upsample = Upsample(blur_kernel) self.conv = ModulatedConv2d(in_channel, 3, 1, style_dim, demodulate =False) self.bias = nn.Parameter(torch.zeros(1, 3, 1, 1)) def forward(self, input_0, input_1): primals_6 = self.bias primals_5 = self.conv.weight primals_2 = self.conv.modulation.weight primals_3 = self.conv.modulation.bias primals_1 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
ArashVahabpour/encoder4editing
ToRGB
false
1,986
[ "MIT" ]
0
819b90ecd7397fbe2ab7cb30eb451dab0f3149fd
https://github.com/ArashVahabpour/encoder4editing/tree/819b90ecd7397fbe2ab7cb30eb451dab0f3149fd
from torch.autograd import Function import math import torch from torch import nn import torch.nn.functional as F def fused_leaky_relu(input, bias, negative_slope=0.2, scale=2 ** 0.5): return FusedLeakyReLUFunction.apply(input, bias, negative_slope, scale) def make_kernel(k): k = torch.tensor(k, dtype=torch.float32) if k.ndim == 1: k = k[None, :] * k[:, None] k /= k.sum() return k def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)): out = UpFirDn2d.apply(input, kernel, (up, up), (down, down), (pad[0], pad[1], pad[0], pad[1])) return out class FusedLeakyReLUFunctionBackward(Function): @staticmethod def forward(ctx, grad_output, out, negative_slope, scale): ctx.save_for_backward(out) ctx.negative_slope = negative_slope ctx.scale = scale empty = grad_output.new_empty(0) grad_input = fused.fused_bias_act(grad_output, empty, out, 3, 1, negative_slope, scale) dim = [0] if grad_input.ndim > 2: dim += list(range(2, grad_input.ndim)) grad_bias = grad_input.sum(dim).detach() return grad_input, grad_bias @staticmethod def backward(ctx, gradgrad_input, gradgrad_bias): out, = ctx.saved_tensors gradgrad_out = fused.fused_bias_act(gradgrad_input, gradgrad_bias, out, 3, 1, ctx.negative_slope, ctx.scale) return gradgrad_out, None, None, None class FusedLeakyReLUFunction(Function): @staticmethod def forward(ctx, input, bias, negative_slope, scale): empty = input.new_empty(0) out = fused.fused_bias_act(input, bias, empty, 3, 0, negative_slope, scale) ctx.save_for_backward(out) ctx.negative_slope = negative_slope ctx.scale = scale return out @staticmethod def backward(ctx, grad_output): out, = ctx.saved_tensors grad_input, grad_bias = FusedLeakyReLUFunctionBackward.apply( grad_output, out, ctx.negative_slope, ctx.scale) return grad_input, grad_bias, None, None class EqualLinear(nn.Module): def __init__(self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1, activation=None): super().__init__() self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul)) if bias: self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init)) else: self.bias = None self.activation = activation self.scale = 1 / math.sqrt(in_dim) * lr_mul self.lr_mul = lr_mul def forward(self, input): if self.activation: out = F.linear(input, self.weight * self.scale) out = fused_leaky_relu(out, self.bias * self.lr_mul) else: out = F.linear(input, self.weight * self.scale, bias=self.bias * self.lr_mul) return out def __repr__(self): return ( f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})' ) class UpFirDn2dBackward(Function): @staticmethod def forward(ctx, grad_output, kernel, grad_kernel, up, down, pad, g_pad, in_size, out_size): up_x, up_y = up down_x, down_y = down g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1 = g_pad grad_output = grad_output.reshape(-1, out_size[0], out_size[1], 1) grad_input = upfirdn2d_op.upfirdn2d(grad_output, grad_kernel, down_x, down_y, up_x, up_y, g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1) grad_input = grad_input.view(in_size[0], in_size[1], in_size[2], in_size[3]) ctx.save_for_backward(kernel) pad_x0, pad_x1, pad_y0, pad_y1 = pad ctx.up_x = up_x ctx.up_y = up_y ctx.down_x = down_x ctx.down_y = down_y ctx.pad_x0 = pad_x0 ctx.pad_x1 = pad_x1 ctx.pad_y0 = pad_y0 ctx.pad_y1 = pad_y1 ctx.in_size = in_size ctx.out_size = out_ # ... truncated (>4000 chars) for memory efficiency
SoftmaxLoss
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/td/ctdj5kazgiki6gdaadhqtp2x7tq2ee5ey5hqqdcoqmp54jyhf74f.py # Topologically Sorted Source Nodes: [loss], Original ATen: [aten._log_softmax] # Source node to ATen node mapping: # loss => amax, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_1, [1], True), kwargs = {}) # %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_1, %amax), kwargs = {}) triton_poi_fused__log_softmax_0 = async_compile.triton('triton_poi_fused__log_softmax_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + (x3), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/t2/ct2dbabladhyyceg2gmfqrslgo4edv7x6gs7iscumud7suileuje.py # Topologically Sorted Source Nodes: [loss], Original ATen: [aten._log_softmax, aten.mul, aten.sum, aten.neg, aten.div] # Source node to ATen node mapping: # loss => div, exp, log, mul, neg, sub_1, sum_1, sum_2 # Graph fragment: # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %primals_4), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul,), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%sum_2,), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Scalar](args = (%neg, 64), kwargs = {}) triton_per_fused__log_softmax_div_mul_neg_sum_1 = async_compile.triton('triton_per_fused__log_softmax_div_mul_neg_sum_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__log_softmax_div_mul_neg_sum_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 6, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__log_softmax_div_mul_neg_sum_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r3 = rindex r0 = rindex % 16 r2 = (rindex // 64) tmp0 = tl.load(in_ptr0 + (r3), None) tmp1 = tl.load(in_ptr0 + (r0 + (64*r2)), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (16 + r0 + (64*r2)), None, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (32 + r0 + (64*r2)), None, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (48 + r0 + (64*r2)), None, eviction_policy='evict_last') tmp14 = tl.load(in_ptr1 + (r3), None) tmp2 = tl_math.exp(tmp1) tmp4 = tl_math.exp(tmp3) tmp5 = tmp2 + tmp4 tmp7 = tl_math.exp(tmp6) tmp8 = tmp5 + tmp7 tmp10 = tl_math.exp(tmp9) tmp11 = tmp8 + tmp10 tmp12 = tl_math.log(tmp11) tmp13 = tmp0 - tmp12 tmp15 = tmp13 * tmp14 tmp16 = tl.broadcast_to(tmp15, [RBLOCK]) tmp18 = triton_helpers.promote_to_tensor(tl.sum(tmp16, 0)) tmp19 = -tmp18 tmp20 = 0.015625 tmp21 = tmp19 * tmp20 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp21, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [logits_BxSpn], Original ATen: [aten.addmm] extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [loss], Original ATen: [aten._log_softmax] stream0 = get_raw_stream(0) triton_poi_fused__log_softmax_0.run(buf0, buf1, 256, grid=grid(256), stream=stream0) buf2 = empty_strided_cuda((), (), torch.float32) buf3 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [loss], Original ATen: [aten._log_softmax, aten.mul, aten.sum, aten.neg, aten.div] triton_per_fused__log_softmax_div_mul_neg_sum_1.run(buf3, buf1, primals_4, 1, 256, grid=grid(1), stream=stream0) del buf1 return (buf3, primals_4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class SoftmaxLoss(nn.Module): def __init__(self, hidden_dim, speaker_num, **kwargs): """ Softmax Loss """ super(SoftmaxLoss, self).__init__() self.fc = nn.Linear(hidden_dim, speaker_num) self.loss = nn.CrossEntropyLoss() def forward(self, x_BxH, labels_B): """ x shape: (B, H) labels shape: (B) """ logits_BxSpn = self.fc(x_BxH) loss = self.loss(logits_BxSpn, labels_B) return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'hidden_dim': 4, 'speaker_num': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) @triton.jit def triton_per_fused__log_softmax_div_mul_neg_sum_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r3 = rindex r0 = rindex % 16 r2 = rindex // 64 tmp0 = tl.load(in_ptr0 + r3, None) tmp1 = tl.load(in_ptr0 + (r0 + 64 * r2), None, eviction_policy='evict_last' ) tmp3 = tl.load(in_ptr0 + (16 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (32 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (48 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp14 = tl.load(in_ptr1 + r3, None) tmp2 = tl_math.exp(tmp1) tmp4 = tl_math.exp(tmp3) tmp5 = tmp2 + tmp4 tmp7 = tl_math.exp(tmp6) tmp8 = tmp5 + tmp7 tmp10 = tl_math.exp(tmp9) tmp11 = tmp8 + tmp10 tmp12 = tl_math.log(tmp11) tmp13 = tmp0 - tmp12 tmp15 = tmp13 * tmp14 tmp16 = tl.broadcast_to(tmp15, [RBLOCK]) tmp18 = triton_helpers.promote_to_tensor(tl.sum(tmp16, 0)) tmp19 = -tmp18 tmp20 = 0.015625 tmp21 = tmp19 * tmp20 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp21, None) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__log_softmax_0[grid(256)](buf0, buf1, 256, XBLOCK= 128, num_warps=4, num_stages=1) buf2 = empty_strided_cuda((), (), torch.float32) buf3 = buf2 del buf2 triton_per_fused__log_softmax_div_mul_neg_sum_1[grid(1)](buf3, buf1, primals_4, 1, 256, num_warps=2, num_stages=1) del buf1 return buf3, primals_4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf0 class SoftmaxLossNew(nn.Module): def __init__(self, hidden_dim, speaker_num, **kwargs): """ Softmax Loss """ super(SoftmaxLossNew, self).__init__() self.fc = nn.Linear(hidden_dim, speaker_num) self.loss = nn.CrossEntropyLoss() def forward(self, input_0, input_1): primals_1 = self.fc.weight primals_2 = self.fc.bias primals_3 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
AyushExel/s3prl
SoftmaxLoss
false
1,987
[ "MIT" ]
0
6531904e9621a778978b9cfef3ba9f582e56639a
https://github.com/AyushExel/s3prl/tree/6531904e9621a778978b9cfef3ba9f582e56639a
import torch import torch.nn as nn class Model(nn.Module): def __init__(self, hidden_dim, speaker_num, **kwargs): """ Softmax Loss """ super().__init__() self.fc = nn.Linear(hidden_dim, speaker_num) self.loss = nn.CrossEntropyLoss() def forward(self, x_BxH, labels_B): """ x shape: (B, H) labels shape: (B) """ logits_BxSpn = self.fc(x_BxH) loss = self.loss(logits_BxSpn, labels_B) return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 4]
ScaledL2Norm
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/2a/c2ardebf3dijah3sug56cdg3pdrqptfnjdozbx2wvqruxlwmuixz.py # Topologically Sorted Source Nodes: [normalize, mul], Original ATen: [aten.div, aten.mul] # Source node to ATen node mapping: # mul => mul # normalize => div # Graph fragment: # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_1, %expand), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, %unsqueeze_2), kwargs = {}) triton_poi_fused_div_mul_0 = async_compile.triton('triton_poi_fused_div_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) x1 = (xindex // 16) % 4 tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-12 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tmp17 = tmp15 * tmp16 tl.store(out_ptr0 + (x3), tmp17, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [normalize, mul], Original ATen: [aten.div, aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_div_mul_0.run(primals_1, primals_2, buf0, 256, grid=grid(256), stream=stream0) del primals_2 return (buf0, primals_1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.onnx import torch import torch.nn as nn import torch.nn.functional as F class ScaledL2Norm(nn.Module): def __init__(self, in_channels, initial_scale): super(ScaledL2Norm, self).__init__() self.in_channels = in_channels self.scale = nn.Parameter(torch.Tensor(in_channels)) self.initial_scale = initial_scale self.reset_parameters() def forward(self, x): return F.normalize(x, p=2, dim=1) * self.scale.unsqueeze(0).unsqueeze(2 ).unsqueeze(3) def reset_parameters(self): self.scale.data.fill_(self.initial_scale) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'initial_scale': 1.0}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.onnx import torch import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_div_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 x1 = xindex // 16 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp16 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-12 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tmp17 = tmp15 * tmp16 tl.store(out_ptr0 + x3, tmp17, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_mul_0[grid(256)](primals_1, primals_2, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 return buf0, primals_1 class ScaledL2NormNew(nn.Module): def __init__(self, in_channels, initial_scale): super(ScaledL2NormNew, self).__init__() self.in_channels = in_channels self.scale = nn.Parameter(torch.Tensor(in_channels)) self.initial_scale = initial_scale self.reset_parameters() def reset_parameters(self): self.scale.data.fill_(self.initial_scale) def forward(self, input_0): primals_2 = self.scale primals_1 = input_0 output = call([primals_1, primals_2]) return output[0]
AndySer37/pytorch-ssd-mobile
ScaledL2Norm
false
1,988
[ "MIT" ]
0
ec4935940ffa374edc1e9a7009c279e727e548d7
https://github.com/AndySer37/pytorch-ssd-mobile/tree/ec4935940ffa374edc1e9a7009c279e727e548d7
import torch import torch.onnx import torch import torch.nn as nn import torch.nn.functional as F class Model(nn.Module): def __init__(self, in_channels, initial_scale): super().__init__() self.in_channels = in_channels self.scale = nn.Parameter(torch.Tensor(in_channels)) self.initial_scale = initial_scale self.reset_parameters() def forward(self, x): return F.normalize(x, p=2, dim=1) * self.scale.unsqueeze(0).unsqueeze(2 ).unsqueeze(3) def reset_parameters(self): self.scale.data.fill_(self.initial_scale) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 1.0]
ChannelNorm
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/pd/cpdkd7oy3g2qlxint6tsd7foiux4zykryk7wd4pgiuuqhuclbvui.py # Topologically Sorted Source Nodes: [cumMean, cumVar, sub, add, rsqrt, x, mul_1, x_1], Original ATen: [aten.mean, aten.var, aten.sub, aten.add, aten.rsqrt, aten.mul] # Source node to ATen node mapping: # add => add # cumMean => mean # cumVar => var # mul_1 => mul_1 # rsqrt => rsqrt # sub => sub # x => mul # x_1 => add_1 # Graph fragment: # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%primals_1, [1], True), kwargs = {}) # %var : [num_users=1] = call_function[target=torch.ops.aten.var.correction](args = (%primals_1, [1]), kwargs = {correction: 1, keepdim: True}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_1, %mean), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%var, 1e-05), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_2), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_3), kwargs = {}) triton_poi_fused_add_mean_mul_rsqrt_sub_var_0 = async_compile.triton('triton_poi_fused_add_mean_mul_rsqrt_sub_var_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mean_mul_rsqrt_sub_var_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mean_mul_rsqrt_sub_var_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x3 = (xindex // 64) x5 = xindex % 16 x1 = (xindex // 4) % 4 tmp0 = tl.load(in_ptr0 + (x4), xmask) tmp1 = tl.load(in_ptr0 + (x5 + (64*x3)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (16 + x5 + (64*x3)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (32 + x5 + (64*x3)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (48 + x5 + (64*x3)), xmask, eviction_policy='evict_last') tmp28 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp30 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = 4.0 tmp9 = tmp7 / tmp8 tmp10 = tmp0 - tmp9 tmp11 = tmp1 - tmp9 tmp12 = tmp11 * tmp11 tmp13 = tmp2 - tmp9 tmp14 = tmp13 * tmp13 tmp15 = tmp12 + tmp14 tmp16 = tmp4 - tmp9 tmp17 = tmp16 * tmp16 tmp18 = tmp15 + tmp17 tmp19 = tmp6 - tmp9 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = 3.0 tmp23 = tmp21 / tmp22 tmp24 = 1e-05 tmp25 = tmp23 + tmp24 tmp26 = libdevice.rsqrt(tmp25) tmp27 = tmp10 * tmp26 tmp29 = tmp27 * tmp28 tmp31 = tmp29 + tmp30 tl.store(out_ptr0 + (x4), tmp31, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1, 4, 1), (4, 1, 1)) assert_size_stride(primals_3, (1, 4, 1), (4, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [cumMean, cumVar, sub, add, rsqrt, x, mul_1, x_1], Original ATen: [aten.mean, aten.var, aten.sub, aten.add, aten.rsqrt, aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_add_mean_mul_rsqrt_sub_var_0.run(primals_1, primals_2, primals_3, buf0, 256, grid=grid(256), stream=stream0) del primals_2 del primals_3 return (buf0, primals_1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((1, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((1, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class ChannelNorm(nn.Module): def __init__(self, numFeatures, epsilon=1e-05, affine=True): super(ChannelNorm, self).__init__() if affine: self.weight = nn.parameter.Parameter(torch.Tensor(1, numFeatures, 1)) self.bias = nn.parameter.Parameter(torch.Tensor(1, numFeatures, 1)) else: self.weight = None self.bias = None self.epsilon = epsilon self.p = 0 self.affine = affine self.reset_parameters() def reset_parameters(self): if self.affine: torch.nn.init.ones_(self.weight) torch.nn.init.zeros_(self.bias) def forward(self, x): cumMean = x.mean(dim=1, keepdim=True) cumVar = x.var(dim=1, keepdim=True) x = (x - cumMean) * torch.rsqrt(cumVar + self.epsilon) if self.weight is not None: x = x * self.weight + self.bias return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'numFeatures': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_mean_mul_rsqrt_sub_var_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x3 = xindex // 64 x5 = xindex % 16 x1 = xindex // 4 % 4 tmp0 = tl.load(in_ptr0 + x4, xmask) tmp1 = tl.load(in_ptr0 + (x5 + 64 * x3), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x5 + 64 * x3), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x5 + 64 * x3), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x5 + 64 * x3), xmask, eviction_policy= 'evict_last') tmp28 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp30 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = 4.0 tmp9 = tmp7 / tmp8 tmp10 = tmp0 - tmp9 tmp11 = tmp1 - tmp9 tmp12 = tmp11 * tmp11 tmp13 = tmp2 - tmp9 tmp14 = tmp13 * tmp13 tmp15 = tmp12 + tmp14 tmp16 = tmp4 - tmp9 tmp17 = tmp16 * tmp16 tmp18 = tmp15 + tmp17 tmp19 = tmp6 - tmp9 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = 3.0 tmp23 = tmp21 / tmp22 tmp24 = 1e-05 tmp25 = tmp23 + tmp24 tmp26 = libdevice.rsqrt(tmp25) tmp27 = tmp10 * tmp26 tmp29 = tmp27 * tmp28 tmp31 = tmp29 + tmp30 tl.store(out_ptr0 + x4, tmp31, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1, 4, 1), (4, 1, 1)) assert_size_stride(primals_3, (1, 4, 1), (4, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mean_mul_rsqrt_sub_var_0[grid(256)](primals_1, primals_2, primals_3, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 del primals_3 return buf0, primals_1 class ChannelNormNew(nn.Module): def __init__(self, numFeatures, epsilon=1e-05, affine=True): super(ChannelNormNew, self).__init__() if affine: self.weight = nn.parameter.Parameter(torch.Tensor(1, numFeatures, 1)) self.bias = nn.parameter.Parameter(torch.Tensor(1, numFeatures, 1)) else: self.weight = None self.bias = None self.epsilon = epsilon self.p = 0 self.affine = affine self.reset_parameters() def reset_parameters(self): if self.affine: torch.nn.init.ones_(self.weight) torch.nn.init.zeros_(self.bias) def forward(self, input_0): primals_2 = self.weight primals_3 = self.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
AyushExel/s3prl
ChannelNorm
false
1,989
[ "MIT" ]
0
6531904e9621a778978b9cfef3ba9f582e56639a
https://github.com/AyushExel/s3prl/tree/6531904e9621a778978b9cfef3ba9f582e56639a
import torch import torch.nn as nn class Model(nn.Module): def __init__(self, numFeatures, epsilon=1e-05, affine=True): super().__init__() if affine: self.weight = nn.parameter.Parameter(torch.Tensor(1, numFeatures, 1)) self.bias = nn.parameter.Parameter(torch.Tensor(1, numFeatures, 1)) else: self.weight = None self.bias = None self.epsilon = epsilon self.p = 0 self.affine = affine self.reset_parameters() def reset_parameters(self): if self.affine: torch.nn.init.ones_(self.weight) torch.nn.init.zeros_(self.bias) def forward(self, x): cumMean = x.mean(dim=1, keepdim=True) cumVar = x.var(dim=1, keepdim=True) x = (x - cumMean) * torch.rsqrt(cumVar + self.epsilon) if self.weight is not None: x = x * self.weight + self.bias return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4]
ModulatedConv2d
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/wi/cwiyl3lwwtancorrifw77xt3aqb4lermdintht45zvkj3bg54nbl.py # Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul] # Source node to ATen node mapping: # mul => mul # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, 0.5), kwargs = {}) triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/2o/c2oqkq7zaubqmw7vuixxlseb2ff5jzqqbyczicxlmsahuxwdpdyp.py # Topologically Sorted Source Nodes: [mul_1], Original ATen: [aten.mul] # Source node to ATen node mapping: # mul_1 => mul_1 # Graph fragment: # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_3, 1), kwargs = {}) triton_poi_fused_mul_1 = async_compile.triton('triton_poi_fused_mul_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/ri/criuvsdl3sferb4bb6ci5zaps3wys7xxcpybz7vfo2ba4q7cuq6c.py # Topologically Sorted Source Nodes: [mul_2, weight, pow_1, sum_1, add, demod, weight_1], Original ATen: [aten.mul, aten.pow, aten.sum, aten.add, aten.rsqrt] # Source node to ATen node mapping: # add => add # demod => rsqrt # mul_2 => mul_2 # pow_1 => pow_1 # sum_1 => sum_1 # weight => mul_3 # weight_1 => mul_4 # Graph fragment: # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_5, 0.125), kwargs = {}) # %mul_3 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_2, %view), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%mul_3, 2), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [2, 3, 4]), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_1, 1e-08), kwargs = {}) # %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_3, %view_1), kwargs = {}) triton_per_fused_add_mul_pow_rsqrt_sum_2 = async_compile.triton('triton_per_fused_add_mul_pow_rsqrt_sum_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[16, 64], reduction_hint=ReductionHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_mul_pow_rsqrt_sum_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_mul_pow_rsqrt_sum_2(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 16 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r5 = rindex x0 = xindex % 4 r3 = (rindex // 16) x1 = (xindex // 4) x4 = xindex tmp0 = tl.load(in_ptr0 + (r5 + (64*x0)), xmask, eviction_policy='evict_last', other=0.0) tmp3 = tl.load(in_ptr1 + (r3 + (4*x1)), xmask, eviction_policy='evict_last', other=0.0) tmp1 = 0.125 tmp2 = tmp0 * tmp1 tmp4 = tmp2 * tmp3 tmp5 = tmp4 * tmp4 tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp8 = tl.where(xmask, tmp6, 0) tmp9 = tl.sum(tmp8, 1)[:, None] tmp10 = 1e-08 tmp11 = tmp9 + tmp10 tmp12 = libdevice.rsqrt(tmp11) tmp13 = tmp4 * tmp12 tl.debug_barrier() tl.store(in_out_ptr0 + (x4), tmp12, xmask) tl.store(out_ptr0 + (r5 + (64*x4)), tmp13, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_mul_0.run(primals_2, buf0, 16, grid=grid(16), stream=stream0) del primals_2 buf1 = empty_strided_cuda((4, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [mul_1], Original ATen: [aten.mul] triton_poi_fused_mul_1.run(primals_3, buf1, 4, grid=grid(4), stream=stream0) del primals_3 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul_1, out], Original ATen: [aten.mul, aten.addmm] extern_kernels.addmm(buf1, primals_4, reinterpret_tensor(buf0, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del buf1 buf3 = buf0; del buf0 # reuse buf4 = buf3; del buf3 # reuse buf5 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul_2, weight, pow_1, sum_1, add, demod, weight_1], Original ATen: [aten.mul, aten.pow, aten.sum, aten.add, aten.rsqrt] triton_per_fused_add_mul_pow_rsqrt_sum_2.run(buf4, primals_5, buf2, buf5, 16, 64, grid=grid(16), stream=stream0) # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.convolution] buf6 = extern_kernels.convolution(reinterpret_tensor(primals_1, (1, 16, 4, 4), (256, 16, 4, 1), 0), reinterpret_tensor(buf5, (16, 4, 4, 4), (64, 16, 4, 1), 0), stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=4, bias=None) assert_size_stride(buf6, (1, 16, 5, 5), (400, 25, 5, 1)) return (reinterpret_tensor(buf6, (4, 4, 5, 5), (100, 25, 5, 1), 0), primals_4, primals_5, buf2, buf4, reinterpret_tensor(buf5, (16, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_1, (1, 16, 4, 4), (256, 16, 4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((1, 4, 4, 4, 4), (256, 64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from torch.autograd import Function import math import torch from torch import nn import torch.nn.functional as F def fused_leaky_relu(input, bias, negative_slope=0.2, scale=2 ** 0.5): return FusedLeakyReLUFunction.apply(input, bias, negative_slope, scale) def make_kernel(k): k = torch.tensor(k, dtype=torch.float32) if k.ndim == 1: k = k[None, :] * k[:, None] k /= k.sum() return k def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)): out = UpFirDn2d.apply(input, kernel, (up, up), (down, down), (pad[0], pad[1], pad[0], pad[1])) return out class FusedLeakyReLUFunctionBackward(Function): @staticmethod def forward(ctx, grad_output, out, negative_slope, scale): ctx.save_for_backward(out) ctx.negative_slope = negative_slope ctx.scale = scale empty = grad_output.new_empty(0) grad_input = fused.fused_bias_act(grad_output, empty, out, 3, 1, negative_slope, scale) dim = [0] if grad_input.ndim > 2: dim += list(range(2, grad_input.ndim)) grad_bias = grad_input.sum(dim).detach() return grad_input, grad_bias @staticmethod def backward(ctx, gradgrad_input, gradgrad_bias): out, = ctx.saved_tensors gradgrad_out = fused.fused_bias_act(gradgrad_input, gradgrad_bias, out, 3, 1, ctx.negative_slope, ctx.scale) return gradgrad_out, None, None, None class FusedLeakyReLUFunction(Function): @staticmethod def forward(ctx, input, bias, negative_slope, scale): empty = input.new_empty(0) out = fused.fused_bias_act(input, bias, empty, 3, 0, negative_slope, scale) ctx.save_for_backward(out) ctx.negative_slope = negative_slope ctx.scale = scale return out @staticmethod def backward(ctx, grad_output): out, = ctx.saved_tensors grad_input, grad_bias = FusedLeakyReLUFunctionBackward.apply( grad_output, out, ctx.negative_slope, ctx.scale) return grad_input, grad_bias, None, None class EqualLinear(nn.Module): def __init__(self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1, activation=None): super().__init__() self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul)) if bias: self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init)) else: self.bias = None self.activation = activation self.scale = 1 / math.sqrt(in_dim) * lr_mul self.lr_mul = lr_mul def forward(self, input): if self.activation: out = F.linear(input, self.weight * self.scale) out = fused_leaky_relu(out, self.bias * self.lr_mul) else: out = F.linear(input, self.weight * self.scale, bias=self.bias * self.lr_mul) return out def __repr__(self): return ( f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})' ) class UpFirDn2dBackward(Function): @staticmethod def forward(ctx, grad_output, kernel, grad_kernel, up, down, pad, g_pad, in_size, out_size): up_x, up_y = up down_x, down_y = down g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1 = g_pad grad_output = grad_output.reshape(-1, out_size[0], out_size[1], 1) grad_input = upfirdn2d_op.upfirdn2d(grad_output, grad_kernel, down_x, down_y, up_x, up_y, g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1) grad_input = grad_input.view(in_size[0], in_size[1], in_size[2], in_size[3]) ctx.save_for_backward(kernel) pad_x0, pad_x1, pad_y0, pad_y1 = pad ctx.up_x = up_x ctx.up_y = up_y ctx.down_x = down_x ctx.down_y = down_y ctx.pad_x0 = pad_x0 ctx.pad_x1 = pad_x1 ctx.pad_y0 = pad_y0 ctx.pad_y1 = pad_y1 ctx.in_size = in_size ctx.out_size = out_size return grad_input @staticmethod def backward(ctx, gradgrad_input): kernel, = ctx.saved_tensors gradgrad_input = gradgrad_input.reshape(-1, ctx.in_size[2], ctx. in_size[3], 1) gradgrad_out = upfirdn2d_op.upfirdn2d(gradgrad_input, kernel, ctx. up_x, ctx.up_y, ctx.down_x, ctx.down_y, ctx.pad_x0, ctx.pad_x1, ctx.pad_y0, ctx.pad_y1) gradgrad_out = gradgrad_out.view(ctx.in_size[0], ctx.in_size[1], ctx.out_size[0], ctx.out_size[1]) return gradgrad_out, None, None, None, None, None, None, None, None class UpFirDn2d(Function): @staticmethod def forward(ctx, input, kernel, up, down, pad): up_x, up_y = up down_x, down_y = down pad_x0, pad_x1, pad_y0, pad_y1 = pad kernel_h, kernel_w = kernel.shape _batch, channel, in_h, in_w = input.shape ctx.in_size = input.shape input = input.reshape(-1, in_h, in_w, 1) ctx.save_for_backward(kernel, torch.flip(kernel, [0, 1])) out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1 out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1 ctx.out_size = out_h, out_w ctx.up = up_x, up_y ctx.down = down_x, down_y ctx.pad = pad_x0, pad_x1, pad_y0, pad_y1 g_pad_x0 = kernel_w - pad_x0 - 1 g_pad_y0 = kernel_h - pad_y0 - 1 g_pad_x1 = in_w * up_x - out_w * down_x + pad_x0 - up_x + 1 g_pad_y1 = in_h * up_y - out_h * down_y + pad_y0 - up_y + 1 ctx.g_pad = g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1 out = upfirdn2d_op.upfirdn2d(input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1) out = out.view(-1, channel, out_h, out_w) return out @staticmethod def backward(ctx, grad_output): kernel, grad_kernel = ctx.saved_tensors grad_input = UpFirDn2dBackward.apply(grad_output, kernel, grad_kernel, ctx.up, ctx.down, ctx.pad, ctx.g_pad, ctx.in_size, ctx.out_size) return grad_input, None, None, None, None class Blur(nn.Module): def __init__(self, kernel, pad, upsample_factor=1): super().__init__() kernel = make_kernel(kernel) if upsample_factor > 1: kernel = kernel * upsample_factor ** 2 self.register_buffer('kernel', kernel) self.pad = pad def forward(self, input): out = upfirdn2d(input, self.kernel, pad=self.pad) return out class ModulatedConv2d(nn.Module): def __init__(self, in_channel, out_channel, kernel_size, style_dim, demodulate=True, upsample=False, downsample=False, blur_kernel=[1, 3, 3, 1]): super().__init__() self.eps = 1e-08 self.kernel_size = kernel_size self.in_channel = in_channel self.out_channel = out_channel self.upsample = upsample self.downsample = downsample if upsample: factor = 2 p = len(blur_kernel) - factor - (kernel_size - 1) pad0 = (p + 1) // 2 + factor - 1 pad1 = p // 2 + 1 self.blur = Blur(blur_kernel, pad=(pad0, pad1), upsample_factor =factor) if downsample: factor = 2 p = len(blur_kernel) - factor + (kernel_size - 1) pad0 = (p + 1) // 2 pad1 = p // 2 self.blur = Blur(blur_kernel, pad=(pad0, pad1)) fan_in = in_channel * kernel_size ** 2 self.scale = 1 / math.sqrt(fan_in) self.padding = kernel_size // 2 self.weight = nn.Parameter(torch.randn(1, out_channel, in_channel, kernel_size, kernel_size)) self.modulation = EqualLinear(style_dim, in_channel, bias_init=1) self.demodulate = demodulate def __repr__(self): return ( f'{self.__class__.__name__}({self.in_channel}, {self.out_channel}, {self.kernel_size}, upsample={self.upsample}, downsample={self.downsample})' ) def forward(self, input, style): batch, in_channel, height, width = input.shape style = self.modulation(style).view(batch, 1, in_channel, 1, 1) weight = self.scale * self.weight * style if self.demodulate: demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) + 1e-08) weight = weight * demod.view(batch, self.out_channel, 1, 1, 1) weight = weight.view(batch * self.out_channel, in_channel, self. kernel_size, self.kernel_size) if self.upsample: input = input.view(1, batch * in_channel, height, width) weight = weight.view(batch, self.out_channel, in_channel, self. kernel_size, self.kernel_size) weight = weight.transpose(1, 2).reshape(batch * in_channel, self.out_channel, self.kernel_size, self.kernel_size) out = F.conv_transpose2d(input, weight, padding=0, stride=2, groups=batch) _, _, height, width = out.shape out = out.view(batch, self.out_channel, height, width) out = self.blur(out) elif self.downsample: input = self.blur(input) _, _, height, width = input.shape input = input.view(1, batch * in_channel, height, width) out = F.conv2d(input, weight, padding=0, stride=2, groups=batch) _, _, height, width = out.shape out = out.view(batch, self.out_channel, height, width) else: input = input.view(1, batch * in_channel, height, width) out = F.conv2d(input, weight, padding=self.padding, groups=batch) _, _, height, width = out.shape out = out.view(batch, self.out_channel, height, width) return out def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'in_channel': 4, 'out_channel': 4, 'kernel_size': 4, 'style_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch.autograd import Function import math from torch import nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_mul_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_per_fused_add_mul_pow_rsqrt_sum_2(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r5 = rindex x0 = xindex % 4 r3 = rindex // 16 x1 = xindex // 4 x4 = xindex tmp0 = tl.load(in_ptr0 + (r5 + 64 * x0), xmask, eviction_policy= 'evict_last', other=0.0) tmp3 = tl.load(in_ptr1 + (r3 + 4 * x1), xmask, eviction_policy= 'evict_last', other=0.0) tmp1 = 0.125 tmp2 = tmp0 * tmp1 tmp4 = tmp2 * tmp3 tmp5 = tmp4 * tmp4 tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp8 = tl.where(xmask, tmp6, 0) tmp9 = tl.sum(tmp8, 1)[:, None] tmp10 = 1e-08 tmp11 = tmp9 + tmp10 tmp12 = libdevice.rsqrt(tmp11) tmp13 = tmp4 * tmp12 tl.debug_barrier() tl.store(in_out_ptr0 + x4, tmp12, xmask) tl.store(out_ptr0 + (r5 + 64 * x4), tmp13, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(16)](primals_2, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_2 buf1 = empty_strided_cuda((4,), (1,), torch.float32) triton_poi_fused_mul_1[grid(4)](primals_3, buf1, 4, XBLOCK=4, num_warps=1, num_stages=1) del primals_3 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(buf1, primals_4, reinterpret_tensor(buf0, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del buf1 buf3 = buf0 del buf0 buf4 = buf3 del buf3 buf5 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) triton_per_fused_add_mul_pow_rsqrt_sum_2[grid(16)](buf4, primals_5, buf2, buf5, 16, 64, XBLOCK=8, num_warps=4, num_stages=1) buf6 = extern_kernels.convolution(reinterpret_tensor(primals_1, (1, 16, 4, 4), (256, 16, 4, 1), 0), reinterpret_tensor(buf5, (16, 4, 4, 4), (64, 16, 4, 1), 0), stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=4, bias=None) assert_size_stride(buf6, (1, 16, 5, 5), (400, 25, 5, 1)) return reinterpret_tensor(buf6, (4, 4, 5, 5), (100, 25, 5, 1), 0 ), primals_4, primals_5, buf2, buf4, reinterpret_tensor(buf5, (16, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_1, (1, 16, 4, 4), (256, 16, 4, 1), 0) def fused_leaky_relu(input, bias, negative_slope=0.2, scale=2 ** 0.5): return FusedLeakyReLUFunction.apply(input, bias, negative_slope, scale) def make_kernel(k): k = torch.tensor(k, dtype=torch.float32) if k.ndim == 1: k = k[None, :] * k[:, None] k /= k.sum() return k def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)): out = UpFirDn2d.apply(input, kernel, (up, up), (down, down), (pad[0], pad[1], pad[0], pad[1])) return out class FusedLeakyReLUFunctionBackward(Function): @staticmethod def forward(ctx, grad_output, out, negative_slope, scale): ctx.save_for_backward(out) ctx.negative_slope = negative_slope ctx.scale = scale empty = grad_output.new_empty(0) grad_input = fused.fused_bias_act(grad_output, empty, out, 3, 1, negative_slope, scale) dim = [0] if grad_input.ndim > 2: dim += list(range(2, grad_input.ndim)) grad_bias = grad_input.sum(dim).detach() return grad_input, grad_bias @staticmethod def backward(ctx, gradgrad_input, gradgrad_bias): out, = ctx.saved_tensors gradgrad_out = fused.fused_bias_act(gradgrad_input, gradgrad_bias, out, 3, 1, ctx.negative_slope, ctx.scale) return gradgrad_out, None, None, None class FusedLeakyReLUFunction(Function): @staticmethod def forward(ctx, input, bias, negative_slope, scale): empty = input.new_empty(0) out = fused.fused_bias_act(input, bias, empty, 3, 0, negative_slope, scale) ctx.save_for_backward(out) ctx.negative_slope = negative_slope ctx.scale = scale return out @staticmethod def backward(ctx, grad_output): out, = ctx.saved_tensors grad_input, grad_bias = FusedLeakyReLUFunctionBackward.apply( grad_output, out, ctx.negative_slope, ctx.scale) return grad_input, grad_bias, None, None class EqualLinear(nn.Module): def __init__(self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1, activation=None): super().__init__() self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul)) if bias: self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init)) else: self.bias = None self.activation = activation self.scale = 1 / math.sqrt(in_dim) * lr_mul self.lr_mul = lr_mul def forward(self, input): if self.activation: out = F.linear(input, self.weight * self.scale) out = fused_leaky_relu(out, self.bias * self.lr_mul) else: out = F.linear(input, self.weight * self.scale, bias=self.bias * self.lr_mul) return out def __repr__(self): return ( f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})' ) class UpFirDn2dBackward(Function): @staticmethod def forward(ctx, grad_output, kernel, grad_kernel, up, down, pad, g_pad, in_size, out_size): up_x, up_y = up down_x, down_y = down g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1 = g_pad grad_output = grad_output.reshape(-1, out_size[0], out_size[1], 1) grad_input = upfirdn2d_op.upfirdn2d(grad_output, grad_kernel, down_x, down_y, up_x, up_y, g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1) grad_input = grad_input.view(in_size[0], in_size[1], in_size[2], in_size[3]) ctx.save_for_backward(kernel) pad_x0, pad_x1, pad_y0, pad_y1 = pad ctx.up_x = up_x ctx.up_y = up_y ctx.down_x = down_x ctx.down_y = down_y ctx.pad_x0 = pad_x0 ctx.pad_x1 = pad_x1 ctx.pad_y0 = pad_y0 ctx.pad_y1 = pad_y1 ctx.in_size = in_size ctx.out_size = out_size return grad_input @staticmethod def backward(ctx, gradgrad_input): kernel, = ctx.saved_tensors gradgrad_input = gradgrad_input.reshape(-1, ctx.in_size[2], ctx. in_size[3], 1) gradgrad_out = upfirdn2d_op.upfirdn2d(gradgrad_input, kernel, ctx. up_x, ctx.up_y, ctx.down_x, ctx.down_y, ctx.pad_x0, ctx.pad_x1, ctx.pad_y0, ctx.pad_y1) gradgrad_out = gradgrad_out.view(ctx.in_size[0], ctx.in_size[1], ctx.out_size[0], ctx.out_size[1]) return gradgrad_out, None, None, None, None, None, None, None, None class UpFirDn2d(Function): @staticmethod def forward(ctx, input, kernel, up, down, pad): up_x, up_y = up down_x, down_y = down pad_x0, pad_x1, pad_y0, pad_y1 = pad kernel_h, kernel_w = kernel.shape _batch, channel, in_h, in_w = input.shape ctx.in_size = input.shape input = input.reshape(-1, in_h, in_w, 1) ctx.save_for_backward(kernel, torch.flip(kernel, [0, 1])) out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1 out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1 ctx.out_size = out_h, out_w ctx.up = up_x, up_y ctx.down = down_x, down_y ctx.pad = pad_x0, pad_x1, pad_y0, pad_y1 g_pad_x0 = kernel_w - pad_x0 - 1 g_pad_y0 = kernel_h - pad_y0 - 1 g_pad_x1 = in_w * up_x - out_w * down_x + pad_x0 - up_x + 1 g_pad_y1 = in_h * up_y - out_h * down_y + pad_y0 - up_y + 1 ctx.g_pad = g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1 out = upfirdn2d_op.upfirdn2d(input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1) out = out.view(-1, channel, out_h, out_w) return out @staticmethod def backward(ctx, grad_output): kernel, grad_kernel = ctx.saved_tensors grad_input = UpFirDn2dBackward.apply(grad_output, kernel, grad_kernel, ctx.up, ctx.down, ctx.pad, ctx.g_pad, ctx.in_size, ctx.out_size) return grad_input, None, None, None, None class Blur(nn.Module): def __init__(self, kernel, pad, upsample_factor=1): super().__init__() kernel = make_kernel(kernel) if upsample_factor > 1: kernel = kernel * upsample_factor ** 2 self.register_buffer('kernel', kernel) self.pad = pad def forward(self, input): out = upfirdn2d(input, self.kernel, pad=self.pad) return out class ModulatedConv2dNew(nn.Module): def __init__(self, in_channel, out_channel, kernel_size, style_dim, demodulate=True, upsample=False, downsample=False, blur_kernel=[1, 3, 3, 1]): super().__init__() self.eps = 1e-08 self.kernel_size = kernel_size self.in_channel = in_channel self.out_channel = out_channel self.upsample = upsample self.downsample = downsample if upsample: factor = 2 p = len(blur_kernel) - factor - (kernel_size - 1) pad0 = (p + 1) // 2 + factor - 1 pad1 = p // 2 + 1 self.blur = Blur(blur_kernel, pad=(pad0, pad1), upsample_factor =factor) if downsample: factor = 2 p = len(blur_kernel) - factor + (kernel_size - 1) pad0 = (p + 1) // 2 pad1 = p // 2 self.blur = Blur(blur_kernel, pad=(pad0, pad1)) fan_in = in_channel * kernel_size ** 2 self.scale = 1 / math.sqrt(fan_in) self.padding = kernel_size // 2 self.weight = nn.Parameter(torch.randn(1, out_channel, in_channel, kernel_size, kernel_size)) self.modulation = EqualLinear(style_dim, in_channel, bias_init=1) self.demodulate = demodulate def __repr__(self): return ( f'{self.__class__.__name__}({self.in_channel}, {self.out_channel}, {self.kernel_size}, upsample={self.upsample}, downsample={self.downsample})' ) def forward(self, input_0, input_1): primals_5 = self.weight primals_2 = self.modulation.weight primals_3 = self.modulation.bias primals_1 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
ArashVahabpour/encoder4editing
ModulatedConv2d
false
1,990
[ "MIT" ]
0
819b90ecd7397fbe2ab7cb30eb451dab0f3149fd
https://github.com/ArashVahabpour/encoder4editing/tree/819b90ecd7397fbe2ab7cb30eb451dab0f3149fd
from torch.autograd import Function import math import torch from torch import nn import torch.nn.functional as F def fused_leaky_relu(input, bias, negative_slope=0.2, scale=2 ** 0.5): return FusedLeakyReLUFunction.apply(input, bias, negative_slope, scale) def make_kernel(k): k = torch.tensor(k, dtype=torch.float32) if k.ndim == 1: k = k[None, :] * k[:, None] k /= k.sum() return k def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)): out = UpFirDn2d.apply(input, kernel, (up, up), (down, down), (pad[0], pad[1], pad[0], pad[1])) return out class FusedLeakyReLUFunctionBackward(Function): @staticmethod def forward(ctx, grad_output, out, negative_slope, scale): ctx.save_for_backward(out) ctx.negative_slope = negative_slope ctx.scale = scale empty = grad_output.new_empty(0) grad_input = fused.fused_bias_act(grad_output, empty, out, 3, 1, negative_slope, scale) dim = [0] if grad_input.ndim > 2: dim += list(range(2, grad_input.ndim)) grad_bias = grad_input.sum(dim).detach() return grad_input, grad_bias @staticmethod def backward(ctx, gradgrad_input, gradgrad_bias): out, = ctx.saved_tensors gradgrad_out = fused.fused_bias_act(gradgrad_input, gradgrad_bias, out, 3, 1, ctx.negative_slope, ctx.scale) return gradgrad_out, None, None, None class FusedLeakyReLUFunction(Function): @staticmethod def forward(ctx, input, bias, negative_slope, scale): empty = input.new_empty(0) out = fused.fused_bias_act(input, bias, empty, 3, 0, negative_slope, scale) ctx.save_for_backward(out) ctx.negative_slope = negative_slope ctx.scale = scale return out @staticmethod def backward(ctx, grad_output): out, = ctx.saved_tensors grad_input, grad_bias = FusedLeakyReLUFunctionBackward.apply( grad_output, out, ctx.negative_slope, ctx.scale) return grad_input, grad_bias, None, None class EqualLinear(nn.Module): def __init__(self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1, activation=None): super().__init__() self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul)) if bias: self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init)) else: self.bias = None self.activation = activation self.scale = 1 / math.sqrt(in_dim) * lr_mul self.lr_mul = lr_mul def forward(self, input): if self.activation: out = F.linear(input, self.weight * self.scale) out = fused_leaky_relu(out, self.bias * self.lr_mul) else: out = F.linear(input, self.weight * self.scale, bias=self.bias * self.lr_mul) return out def __repr__(self): return ( f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})' ) class UpFirDn2dBackward(Function): @staticmethod def forward(ctx, grad_output, kernel, grad_kernel, up, down, pad, g_pad, in_size, out_size): up_x, up_y = up down_x, down_y = down g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1 = g_pad grad_output = grad_output.reshape(-1, out_size[0], out_size[1], 1) grad_input = upfirdn2d_op.upfirdn2d(grad_output, grad_kernel, down_x, down_y, up_x, up_y, g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1) grad_input = grad_input.view(in_size[0], in_size[1], in_size[2], in_size[3]) ctx.save_for_backward(kernel) pad_x0, pad_x1, pad_y0, pad_y1 = pad ctx.up_x = up_x ctx.up_y = up_y ctx.down_x = down_x ctx.down_y = down_y ctx.pad_x0 = pad_x0 ctx.pad_x1 = pad_x1 ctx.pad_y0 = pad_y0 ctx.pad_y1 = pad_y1 ctx.in_size = in_size ctx.out_size = out_ # ... truncated (>4000 chars) for memory efficiency
AMSoftmaxLoss
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/zk/czk5xfokmwnuegxn53eciq25366p2is3a6lxx47tlosf3q225vha.py # Topologically Sorted Source Nodes: [x_BxH], Original ATen: [aten.div] # Source node to ATen node mapping: # x_BxH => div_1 # Graph fragment: # %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_1, %expand_1), kwargs = {}) triton_poi_fused_div_0 = async_compile.triton('triton_poi_fused_div_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-12 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + (x2), tmp15, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/xe/cxewggzrfqe57dzglxrzfhfgpsywlh36utvtdulp5oi75wfs7ml3.py # Topologically Sorted Source Nodes: [W], Original ATen: [aten.div] # Source node to ATen node mapping: # W => div # Graph fragment: # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_3, %expand), kwargs = {}) triton_poi_fused_div_1 = async_compile.triton('triton_poi_fused_div_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (12 + x0), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-12 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + (x2), tmp15, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/zq/czqoy42ufye2afnopzmn2loqzazgyudvxxmzemheojq4scu6j5lz.py # Topologically Sorted Source Nodes: [sub, numerator], Original ATen: [aten.sub, aten.mul] # Source node to ATen node mapping: # numerator => mul # sub => sub # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%diagonal, 0.4), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, 30.0), kwargs = {}) triton_poi_fused_mul_sub_2 = async_compile.triton('triton_poi_fused_mul_sub_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*i64', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sub_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_sub_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl.full([XBLOCK], 4, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tl.device_assert(((0 <= tmp4) & (tmp4 < 4)) | ~(xmask), "index out of bounds: 0 <= tmp4 < 4") tmp6 = tl.load(in_ptr1 + (tmp4 + (4*x0)), xmask, eviction_policy='evict_last') tmp7 = 0.4 tmp8 = tmp6 - tmp7 tmp9 = 30.0 tmp10 = tmp8 * tmp9 tl.store(out_ptr0 + (x0), tmp10, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_BxH], Original ATen: [aten.div] stream0 = get_raw_stream(0) triton_poi_fused_div_0.run(primals_1, buf0, 16, grid=grid(16), stream=stream0) del primals_1 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [W], Original ATen: [aten.div] triton_poi_fused_div_1.run(primals_3, buf1, 16, grid=grid(16), stream=stream0) buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [W, wf], Original ATen: [aten.div, aten.mm] extern_kernels.mm(buf0, buf1, out=buf2) del buf1 buf3 = empty_strided_cuda((4, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [sub, numerator], Original ATen: [aten.sub, aten.mul] triton_poi_fused_mul_sub_2.run(primals_2, buf2, buf3, 4, grid=grid(4), stream=stream0) return (buf3, buf2, primals_2, primals_3, reinterpret_tensor(buf0, (4, 4), (1, 4), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.int64) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class AMSoftmaxLoss(nn.Module): def __init__(self, hidden_dim, speaker_num, s=30.0, m=0.4, **kwargs): """ AM Softmax Loss """ super(AMSoftmaxLoss, self).__init__() self.s = s self.m = m self.speaker_num = speaker_num self.W = torch.nn.Parameter(torch.randn(hidden_dim, speaker_num), requires_grad=True) nn.init.xavier_normal_(self.W, gain=1) def forward(self, x_BxH, labels_B): """ x shape: (B, H) labels shape: (B) """ assert len(x_BxH) == len(labels_B) assert torch.min(labels_B) >= 0 assert torch.max(labels_B) < self.speaker_num W = F.normalize(self.W, dim=0) x_BxH = F.normalize(x_BxH, dim=1) wf = torch.mm(x_BxH, W) numerator = self.s * (torch.diagonal(wf.transpose(0, 1)[labels_B]) - self.m) excl = torch.cat([torch.cat((wf[i, :y], wf[i, y + 1:])).unsqueeze(0 ) for i, y in enumerate(labels_B)], dim=0) denominator = torch.exp(numerator) + torch.sum(torch.exp(self.s * excl), dim=1) L = numerator - torch.log(denominator) return -torch.mean(L) def get_inputs(): return [torch.rand([4, 4]), torch.ones([4], dtype=torch.int64)] def get_init_inputs(): return [[], {'hidden_dim': 4, 'speaker_num': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-12 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + x2, tmp15, xmask) @triton.jit def triton_poi_fused_div_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (12 + x0), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-12 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + x2, tmp15, xmask) @triton.jit def triton_poi_fused_mul_sub_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.full([XBLOCK], 4, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tl.device_assert((0 <= tmp4) & (tmp4 < 4) | ~xmask, 'index out of bounds: 0 <= tmp4 < 4') tmp6 = tl.load(in_ptr1 + (tmp4 + 4 * x0), xmask, eviction_policy= 'evict_last') tmp7 = 0.4 tmp8 = tmp6 - tmp7 tmp9 = 30.0 tmp10 = tmp8 * tmp9 tl.store(out_ptr0 + x0, tmp10, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_0[grid(16)](primals_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_div_1[grid(16)](primals_3, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf0, buf1, out=buf2) del buf1 buf3 = empty_strided_cuda((4,), (1,), torch.float32) triton_poi_fused_mul_sub_2[grid(4)](primals_2, buf2, buf3, 4, XBLOCK=4, num_warps=1, num_stages=1) return buf3, buf2, primals_2, primals_3, reinterpret_tensor(buf0, (4, 4 ), (1, 4), 0) class AMSoftmaxLossNew(nn.Module): def __init__(self, hidden_dim, speaker_num, s=30.0, m=0.4, **kwargs): """ AM Softmax Loss """ super(AMSoftmaxLossNew, self).__init__() self.s = s self.m = m self.speaker_num = speaker_num self.W = torch.nn.Parameter(torch.randn(hidden_dim, speaker_num), requires_grad=True) nn.init.xavier_normal_(self.W, gain=1) def forward(self, input_0, input_1): primals_1 = self.W primals_3 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3]) return output[0]
AyushExel/s3prl
AMSoftmaxLoss
false
1,991
[ "MIT" ]
0
6531904e9621a778978b9cfef3ba9f582e56639a
https://github.com/AyushExel/s3prl/tree/6531904e9621a778978b9cfef3ba9f582e56639a
import torch import torch.nn as nn import torch.nn.functional as F class Model(nn.Module): def __init__(self, hidden_dim, speaker_num, s=30.0, m=0.4, **kwargs): """ AM Softmax Loss """ super().__init__() self.s = s self.m = m self.speaker_num = speaker_num self.W = torch.nn.Parameter(torch.randn(hidden_dim, speaker_num), requires_grad=True) nn.init.xavier_normal_(self.W, gain=1) def forward(self, x_BxH, labels_B): """ x shape: (B, H) labels shape: (B) """ assert len(x_BxH) == len(labels_B) assert torch.min(labels_B) >= 0 assert torch.max(labels_B) < self.speaker_num W = F.normalize(self.W, dim=0) x_BxH = F.normalize(x_BxH, dim=1) wf = torch.mm(x_BxH, W) numerator = self.s * (torch.diagonal(wf.transpose(0, 1)[labels_B]) - self.m) excl = torch.cat([torch.cat((wf[i, :y], wf[i, y + 1:])).unsqueeze(0 ) for i, y in enumerate(labels_B)], dim=0) denominator = torch.exp(numerator) + torch.sum(torch.exp(self.s * excl), dim=1) L = numerator - torch.log(denominator) return -torch.mean(L) def get_inputs(): return [torch.rand([4, 4]), torch.ones([4], dtype=torch.int64)] def get_init_inputs(): return [4, 4]
AdMSoftmaxLoss
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/fh/cfhnguw4v6uy4ysjg54ojclakwi3bj2lte6oqizl4rpf4lcxpiyp.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.div] # Source node to ATen node mapping: # x => div_1 # Graph fragment: # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_1, %expand_1), kwargs = {}) triton_poi_fused_div_0 = async_compile.triton('triton_poi_fused_div_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-12 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + (x3), tmp15, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/rd/crdggc56dw4i2v6iocdkenrwijoptevzhgfj4622ke7pcflpdwxy.py # Topologically Sorted Source Nodes: [sub, numerator], Original ATen: [aten.sub, aten.mul] # Source node to ATen node mapping: # numerator => mul # sub => sub # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%diagonal, 0.4), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, 30.0), kwargs = {}) triton_poi_fused_mul_sub_1 = async_compile.triton('triton_poi_fused_mul_sub_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*i64', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sub_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_sub_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 16) x0 = xindex % 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 4, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tl.device_assert(((0 <= tmp4) & (tmp4 < 4)) | ~(xmask), "index out of bounds: 0 <= tmp4 < 4") tmp6 = tl.load(in_ptr1 + (x0 + (16*tmp4) + (64*x1)), xmask) tmp7 = 0.4 tmp8 = tmp6 - tmp7 tmp9 = 30.0 tmp10 = tmp8 * tmp9 tl.store(out_ptr0 + (x2), tmp10, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.div] stream0 = get_raw_stream(0) triton_poi_fused_div_0.run(primals_1, buf0, 256, grid=grid(256), stream=stream0) del primals_1 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [wf], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf1) del primals_3 buf2 = empty_strided_cuda((4, 4, 4), (4, 1, 16), torch.float32) # Topologically Sorted Source Nodes: [sub, numerator], Original ATen: [aten.sub, aten.mul] triton_poi_fused_mul_sub_1.run(primals_2, buf1, buf2, 64, grid=grid(64), stream=stream0) return (buf2, reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0), primals_2, reinterpret_tensor(buf0, (64, 4), (4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.int64) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class AdMSoftmaxLoss(nn.Module): def __init__(self, in_features, out_features, s=30.0, m=0.4): """ AM Softmax Loss """ super(AdMSoftmaxLoss, self).__init__() self.s = s self.m = m self.in_features = in_features self.out_features = out_features self.fc = nn.Linear(in_features, out_features, bias=False) def forward(self, x, labels): """ input shape (N, in_features) """ assert len(x) == len(labels) assert torch.min(labels) >= 0 assert torch.max(labels) < self.out_features for W in self.fc.parameters(): W = F.normalize(W, dim=1) x = F.normalize(x, dim=1) wf = self.fc(x) numerator = self.s * (torch.diagonal(wf.transpose(0, 1)[labels]) - self.m) excl = torch.cat([torch.cat((wf[i, :y], wf[i, y + 1:])).unsqueeze(0 ) for i, y in enumerate(labels)], dim=0) denominator = torch.exp(numerator) + torch.sum(torch.exp(self.s * excl), dim=1) L = numerator - torch.log(denominator) return -torch.mean(L) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.ones([4], dtype=torch.int64)] def get_init_inputs(): return [[], {'in_features': 4, 'out_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-12 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + x3, tmp15, xmask) @triton.jit def triton_poi_fused_mul_sub_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 x0 = xindex % 16 x2 = xindex tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 4, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tl.device_assert((0 <= tmp4) & (tmp4 < 4) | ~xmask, 'index out of bounds: 0 <= tmp4 < 4') tmp6 = tl.load(in_ptr1 + (x0 + 16 * tmp4 + 64 * x1), xmask) tmp7 = 0.4 tmp8 = tmp6 - tmp7 tmp9 = 30.0 tmp10 = tmp8 * tmp9 tl.store(out_ptr0 + x2, tmp10, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_0[grid(256)](primals_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf1) del primals_3 buf2 = empty_strided_cuda((4, 4, 4), (4, 1, 16), torch.float32) triton_poi_fused_mul_sub_1[grid(64)](primals_2, buf1, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) return buf2, reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), primals_2, reinterpret_tensor(buf0, (64, 4), (4, 1), 0) class AdMSoftmaxLossNew(nn.Module): def __init__(self, in_features, out_features, s=30.0, m=0.4): """ AM Softmax Loss """ super(AdMSoftmaxLossNew, self).__init__() self.s = s self.m = m self.in_features = in_features self.out_features = out_features self.fc = nn.Linear(in_features, out_features, bias=False) def forward(self, input_0, input_1): primals_3 = self.fc.weight primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3]) return output[0]
AyushExel/s3prl
AdMSoftmaxLoss
false
1,992
[ "MIT" ]
0
6531904e9621a778978b9cfef3ba9f582e56639a
https://github.com/AyushExel/s3prl/tree/6531904e9621a778978b9cfef3ba9f582e56639a
import torch import torch.nn as nn import torch.nn.functional as F class Model(nn.Module): def __init__(self, in_features, out_features, s=30.0, m=0.4): """ AM Softmax Loss """ super().__init__() self.s = s self.m = m self.in_features = in_features self.out_features = out_features self.fc = nn.Linear(in_features, out_features, bias=False) def forward(self, x, labels): """ input shape (N, in_features) """ assert len(x) == len(labels) assert torch.min(labels) >= 0 assert torch.max(labels) < self.out_features for W in self.fc.parameters(): W = F.normalize(W, dim=1) x = F.normalize(x, dim=1) wf = self.fc(x) numerator = self.s * (torch.diagonal(wf.transpose(0, 1)[labels]) - self.m) excl = torch.cat([torch.cat((wf[i, :y], wf[i, y + 1:])).unsqueeze(0 ) for i, y in enumerate(labels)], dim=0) denominator = torch.exp(numerator) + torch.sum(torch.exp(self.s * excl), dim=1) L = numerator - torch.log(denominator) return -torch.mean(L) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.ones([4], dtype=torch.int64)] def get_init_inputs(): return [4, 4]
Model
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/hh/chh6c5w5qa6uf7vojzls7kg4by5riqn4sgtlt67ukhrqv4nd6zcl.py # Topologically Sorted Source Nodes: [pooled], Original ATen: [aten.mean] # Source node to ATen node mapping: # pooled => mean # Graph fragment: # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%primals_1, [1]), kwargs = {}) triton_poi_fused_mean_0 = async_compile.triton('triton_poi_fused_mean_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mean_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mean_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = (xindex // 16) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask) tmp1 = tl.load(in_ptr0 + (16 + x0 + (64*x1)), xmask) tmp3 = tl.load(in_ptr0 + (32 + x0 + (64*x1)), xmask) tmp5 = tl.load(in_ptr0 + (48 + x0 + (64*x1)), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [pooled], Original ATen: [aten.mean] stream0 = get_raw_stream(0) triton_poi_fused_mean_0.run(primals_1, buf0, 64, grid=grid(64), stream=stream0) del primals_1 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [predicted], Original ATen: [aten.addmm] extern_kernels.addmm(primals_3, reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_2 del primals_3 return (reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf0, (16, 4), (4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class Model(nn.Module): def __init__(self, input_dim, output_class_num, **kwargs): super(Model, self).__init__() self.linear = nn.Linear(input_dim, output_class_num) def forward(self, features): pooled = features.mean(dim=1) predicted = self.linear(pooled) return predicted def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_dim': 4, 'output_class_num': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mean_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tmp1 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask) tmp3 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask) tmp5 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mean_0[grid(64)](primals_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_1 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_3, reinterpret_tensor(buf0, (16, 4), ( 4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_2 del primals_3 return reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0 ), reinterpret_tensor(buf0, (16, 4), (4, 1), 0) class ModelNew(nn.Module): def __init__(self, input_dim, output_class_num, **kwargs): super(ModelNew, self).__init__() self.linear = nn.Linear(input_dim, output_class_num) def forward(self, input_0): primals_2 = self.linear.weight primals_3 = self.linear.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
AyushExel/s3prl
Model
false
1,993
[ "MIT" ]
0
6531904e9621a778978b9cfef3ba9f582e56639a
https://github.com/AyushExel/s3prl/tree/6531904e9621a778978b9cfef3ba9f582e56639a
import torch import torch.nn as nn class Model(nn.Module): def __init__(self, input_dim, output_class_num, **kwargs): super(Model, self).__init__() self.linear = nn.Linear(input_dim, output_class_num) def forward(self, features): pooled = features.mean(dim=1) predicted = self.linear(pooled) return predicted def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 4]
OutConv
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/pw/cpw5jgywzg5ntkknxkt5orxsrrr5zq7a6eoteboi3ba7zrcxj2p7.py # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] # Source node to ATen node mapping: # conv2d => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] stream0 = get_raw_stream(0) triton_poi_fused_convolution_0.run(buf1, primals_2, 256, grid=grid(256), stream=stream0) del primals_2 return (buf1, primals_1, primals_3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.utils.data import torch import torch.nn as nn class OutConv(nn.Module): def __init__(self, in_channels, out_channels): super(OutConv, self).__init__() self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=1) def forward(self, x): return self.conv(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.utils.data import torch import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(256)](buf1, primals_2, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 return buf1, primals_1, primals_3 class OutConvNew(nn.Module): def __init__(self, in_channels, out_channels): super(OutConvNew, self).__init__() self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=1) def forward(self, input_0): primals_1 = self.conv.weight primals_2 = self.conv.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
AzmHmd/RMS
OutConv
false
1,994
[ "MIT" ]
0
61d108e118d1e06de324644ebd8d92fc1b091b91
https://github.com/AzmHmd/RMS/tree/61d108e118d1e06de324644ebd8d92fc1b091b91
import torch import torch.utils.data import torch import torch.nn as nn class Model(nn.Module): def __init__(self, in_channels, out_channels): super().__init__() self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=1) def forward(self, x): return self.conv(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 4]
SelfAttentionPooling
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/xj/cxjhy4b2emwxu6foyq65uamx25utrr6yabi4u4clztnbed2dydcn.py # Topologically Sorted Source Nodes: [att_logits_1, softmax], Original ATen: [aten.add, aten._softmax] # Source node to ATen node mapping: # att_logits_1 => add # softmax => amax, exp, sub, sum_1 # Graph fragment: # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_4, %squeeze), kwargs = {}) # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%add, [-1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) triton_poi_fused__softmax_add_0 = async_compile.triton('triton_poi_fused__softmax_add_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_add_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_add_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_ptr0 + (4*x2), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + (4*x2)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + (4*x2)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = triton_helpers.maximum(tmp2, tmp5) tmp9 = tmp7 + tmp8 tmp10 = triton_helpers.maximum(tmp6, tmp9) tmp13 = tmp11 + tmp12 tmp14 = triton_helpers.maximum(tmp10, tmp13) tmp15 = tmp2 - tmp14 tmp16 = tl_math.exp(tmp15) tmp17 = tmp5 - tmp14 tmp18 = tl_math.exp(tmp17) tmp19 = tmp16 + tmp18 tmp20 = tmp9 - tmp14 tmp21 = tl_math.exp(tmp20) tmp22 = tmp19 + tmp21 tmp23 = tmp13 - tmp14 tmp24 = tl_math.exp(tmp23) tmp25 = tmp22 + tmp24 tl.store(out_ptr0 + (x2), tmp14, xmask) tl.store(out_ptr1 + (x2), tmp25, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/no/cnoe2ucpkq6j5eael53gjc3t3cs46utcvmwkmw73j6lruo6fspxe.py # Topologically Sorted Source Nodes: [mul, utter_rep], Original ATen: [aten.mul, aten.sum] # Source node to ATen node mapping: # mul => mul # utter_rep => sum_2 # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, %unsqueeze), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {}) triton_poi_fused_mul_sum_1 = async_compile.triton('triton_poi_fused_mul_sum_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sum_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 20, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_sum_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex % 64 x3 = (xindex // 64) x5 = (xindex // 4) % 16 x2 = (xindex // 16) % 4 x7 = xindex tmp0 = tl.load(in_ptr0 + (x4), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x5 + (64*x3)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + (x5), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr3 + (x2 + (16*x3)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + (x2 + (16*x3)), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr0 + (64 + x4), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr1 + (16 + x5 + (64*x3)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr2 + (16 + x5), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr3 + (4 + x2 + (16*x3)), xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr4 + (4 + x2 + (16*x3)), xmask, eviction_policy='evict_last') tmp21 = tl.load(in_ptr0 + (128 + x4), xmask, eviction_policy='evict_last') tmp22 = tl.load(in_ptr1 + (32 + x5 + (64*x3)), xmask, eviction_policy='evict_last') tmp23 = tl.load(in_ptr2 + (32 + x5), xmask, eviction_policy='evict_last') tmp25 = tl.load(in_ptr3 + (8 + x2 + (16*x3)), xmask, eviction_policy='evict_last') tmp28 = tl.load(in_ptr4 + (8 + x2 + (16*x3)), xmask, eviction_policy='evict_last') tmp32 = tl.load(in_ptr0 + (192 + x4), xmask, eviction_policy='evict_last') tmp33 = tl.load(in_ptr1 + (48 + x5 + (64*x3)), xmask, eviction_policy='evict_last') tmp34 = tl.load(in_ptr2 + (48 + x5), xmask, eviction_policy='evict_last') tmp36 = tl.load(in_ptr3 + (12 + x2 + (16*x3)), xmask, eviction_policy='evict_last') tmp39 = tl.load(in_ptr4 + (12 + x2 + (16*x3)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 - tmp4 tmp6 = tl_math.exp(tmp5) tmp8 = tmp6 / tmp7 tmp9 = tmp0 * tmp8 tmp13 = tmp11 + tmp12 tmp15 = tmp13 - tmp14 tmp16 = tl_math.exp(tmp15) tmp18 = tmp16 / tmp17 tmp19 = tmp10 * tmp18 tmp20 = tmp9 + tmp19 tmp24 = tmp22 + tmp23 tmp26 = tmp24 - tmp25 tmp27 = tl_math.exp(tmp26) tmp29 = tmp27 / tmp28 tmp30 = tmp21 * tmp29 tmp31 = tmp20 + tmp30 tmp35 = tmp33 + tmp34 tmp37 = tmp35 - tmp36 tmp38 = tl_math.exp(tmp37) tmp40 = tmp38 / tmp39 tmp41 = tmp32 * tmp40 tmp42 = tmp31 + tmp41 tl.store(out_ptr0 + (x7), tmp42, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1, 4), (4, 1)) assert_size_stride(primals_3, (1, ), (1, )) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((64, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm] extern_kernels.addmm(primals_3, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_2 del primals_3 buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) # Topologically Sorted Source Nodes: [att_logits_1, softmax], Original ATen: [aten.add, aten._softmax] stream0 = get_raw_stream(0) triton_poi_fused__softmax_add_0.run(primals_4, buf1, buf2, buf3, 64, grid=grid(64), stream=stream0) buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul, utter_rep], Original ATen: [aten.mul, aten.sum] triton_poi_fused_mul_sum_1.run(primals_1, primals_4, buf1, buf2, buf3, buf4, 256, grid=grid(256), stream=stream0) del buf2 del buf3 return (buf4, primals_1, primals_4, buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class SelfAttentionPooling(nn.Module): """ Implementation of SelfAttentionPooling Original Paper: Self-Attention Encoding and Pooling for Speaker Recognition https://arxiv.org/pdf/2008.01077v1.pdf """ def __init__(self, input_dim): super(SelfAttentionPooling, self).__init__() self.W = nn.Linear(input_dim, 1) self.softmax = nn.functional.softmax def forward(self, batch_rep, att_mask): """ input: batch_rep : size (N, T, H), N: batch size, T: sequence length, H: Hidden dimension attention_weight: att_w : size (N, T, 1) return: utter_rep: size (N, H) """ batch_rep.shape[1] att_logits = self.W(batch_rep).squeeze(-1) att_logits = att_mask + att_logits att_w = self.softmax(att_logits, dim=-1).unsqueeze(-1) utter_rep = torch.sum(batch_rep * att_w, dim=1) return utter_rep def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__softmax_add_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_ptr0 + 4 * x2, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x2), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x2), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = triton_helpers.maximum(tmp2, tmp5) tmp9 = tmp7 + tmp8 tmp10 = triton_helpers.maximum(tmp6, tmp9) tmp13 = tmp11 + tmp12 tmp14 = triton_helpers.maximum(tmp10, tmp13) tmp15 = tmp2 - tmp14 tmp16 = tl_math.exp(tmp15) tmp17 = tmp5 - tmp14 tmp18 = tl_math.exp(tmp17) tmp19 = tmp16 + tmp18 tmp20 = tmp9 - tmp14 tmp21 = tl_math.exp(tmp20) tmp22 = tmp19 + tmp21 tmp23 = tmp13 - tmp14 tmp24 = tl_math.exp(tmp23) tmp25 = tmp22 + tmp24 tl.store(out_ptr0 + x2, tmp14, xmask) tl.store(out_ptr1 + x2, tmp25, xmask) @triton.jit def triton_poi_fused_mul_sum_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex % 64 x3 = xindex // 64 x5 = xindex // 4 % 16 x2 = xindex // 16 % 4 x7 = xindex tmp0 = tl.load(in_ptr0 + x4, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x5 + 64 * x3), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr2 + x5, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr3 + (x2 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp7 = tl.load(in_ptr4 + (x2 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp10 = tl.load(in_ptr0 + (64 + x4), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr1 + (16 + x5 + 64 * x3), xmask, eviction_policy= 'evict_last') tmp12 = tl.load(in_ptr2 + (16 + x5), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr3 + (4 + x2 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp17 = tl.load(in_ptr4 + (4 + x2 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp21 = tl.load(in_ptr0 + (128 + x4), xmask, eviction_policy='evict_last') tmp22 = tl.load(in_ptr1 + (32 + x5 + 64 * x3), xmask, eviction_policy= 'evict_last') tmp23 = tl.load(in_ptr2 + (32 + x5), xmask, eviction_policy='evict_last') tmp25 = tl.load(in_ptr3 + (8 + x2 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp28 = tl.load(in_ptr4 + (8 + x2 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp32 = tl.load(in_ptr0 + (192 + x4), xmask, eviction_policy='evict_last') tmp33 = tl.load(in_ptr1 + (48 + x5 + 64 * x3), xmask, eviction_policy= 'evict_last') tmp34 = tl.load(in_ptr2 + (48 + x5), xmask, eviction_policy='evict_last') tmp36 = tl.load(in_ptr3 + (12 + x2 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp39 = tl.load(in_ptr4 + (12 + x2 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 - tmp4 tmp6 = tl_math.exp(tmp5) tmp8 = tmp6 / tmp7 tmp9 = tmp0 * tmp8 tmp13 = tmp11 + tmp12 tmp15 = tmp13 - tmp14 tmp16 = tl_math.exp(tmp15) tmp18 = tmp16 / tmp17 tmp19 = tmp10 * tmp18 tmp20 = tmp9 + tmp19 tmp24 = tmp22 + tmp23 tmp26 = tmp24 - tmp25 tmp27 = tl_math.exp(tmp26) tmp29 = tmp27 / tmp28 tmp30 = tmp21 * tmp29 tmp31 = tmp20 + tmp30 tmp35 = tmp33 + tmp34 tmp37 = tmp35 - tmp36 tmp38 = tl_math.exp(tmp37) tmp40 = tmp38 / tmp39 tmp41 = tmp32 * tmp40 tmp42 = tmp31 + tmp41 tl.store(out_ptr0 + x7, tmp42, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1, 4), (4, 1)) assert_size_stride(primals_3, (1,), (1,)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_3, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 1), (1, 4), 0 ), alpha=1, beta=1, out=buf1) del primals_2 del primals_3 buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_add_0[grid(64)](primals_4, buf1, buf2, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_mul_sum_1[grid(256)](primals_1, primals_4, buf1, buf2, buf3, buf4, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf2 del buf3 return buf4, primals_1, primals_4, buf1 class SelfAttentionPoolingNew(nn.Module): """ Implementation of SelfAttentionPooling Original Paper: Self-Attention Encoding and Pooling for Speaker Recognition https://arxiv.org/pdf/2008.01077v1.pdf """ def __init__(self, input_dim): super(SelfAttentionPoolingNew, self).__init__() self.W = nn.Linear(input_dim, 1) self.softmax = nn.functional.softmax def forward(self, input_0, input_1): primals_2 = self.W.weight primals_3 = self.W.bias primals_1 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
AyushExel/s3prl
SelfAttentionPooling
false
1,995
[ "MIT" ]
0
6531904e9621a778978b9cfef3ba9f582e56639a
https://github.com/AyushExel/s3prl/tree/6531904e9621a778978b9cfef3ba9f582e56639a
import torch import torch.nn as nn class Model(nn.Module): """ Implementation of SelfAttentionPooling Original Paper: Self-Attention Encoding and Pooling for Speaker Recognition https://arxiv.org/pdf/2008.01077v1.pdf """ def __init__(self, input_dim): super().__init__() self.W = nn.Linear(input_dim, 1) self.softmax = nn.functional.softmax def forward(self, batch_rep, att_mask): """ input: batch_rep : size (N, T, H), N: batch size, T: sequence length, H: Hidden dimension attention_weight: att_w : size (N, T, 1) return: utter_rep: size (N, H) """ batch_rep.shape[1] att_logits = self.W(batch_rep).squeeze(-1) att_logits = att_mask + att_logits att_w = self.softmax(att_logits, dim=-1).unsqueeze(-1) utter_rep = torch.sum(batch_rep * att_w, dim=1) return utter_rep def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4]
ResidualBlock
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/v6/cv6oewqqnsshd7he7ylh2kikzu4smtrhj2dmv6nb5csosp7g6vw5.py # Topologically Sorted Source Nodes: [out], Original ATen: [aten.reflection_pad2d] # Source node to ATen node mapping: # out => _unsafe_index, _unsafe_index_1 # Graph fragment: # %_unsafe_index : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_1, [None, None, %sub_1, None]), kwargs = {}) # %_unsafe_index_1 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index, [None, None, None, %sub_1]), kwargs = {}) triton_poi_fused_reflection_pad2d_0 = async_compile.triton('triton_poi_fused_reflection_pad2d_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_reflection_pad2d_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 6 x1 = (xindex // 6) % 6 x2 = (xindex // 36) x3 = xindex tmp0 = tl.load(in_ptr0 + (15 + ((-1)*(tl_math.abs((-3) + (tl_math.abs((-1) + x0))))) + ((-4)*(tl_math.abs((-3) + (tl_math.abs((-1) + x1))))) + (16*x2)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (x3), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/t6/ct6syu6rq3n7yx3zuog2yujcrfreefdccraqz7zj2m3c5xhvp5vl.py # Topologically Sorted Source Nodes: [out_1, instance_norm], Original ATen: [aten.convolution, aten._native_batch_norm_legit] # Source node to ATen node mapping: # instance_norm => add, rsqrt, var_mean # out_1 => convolution # Graph fragment: # %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_1, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view, [0, 2, 3]), kwargs = {correction: 0, keepdim: True}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {}) # %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) triton_per_fused__native_batch_norm_legit_convolution_1 = async_compile.triton('triton_per_fused__native_batch_norm_legit_convolution_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[16, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit_convolution_1', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__native_batch_norm_legit_convolution_1(in_out_ptr0, in_out_ptr1, in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 16 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x3 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (r2 + (16*x3)), xmask, other=0.0) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = tl.where(xmask, tmp3, 0) tmp6 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp8 = tl.where(xmask, tmp6, 0) tmp9 = tl.sum(tmp8, 1)[:, None] tmp10 = tl.full([XBLOCK, 1], 16, tl.int32) tmp11 = tmp10.to(tl.float32) tmp12 = tmp9 / tmp11 tmp13 = tmp3 - tmp12 tmp14 = tmp13 * tmp13 tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK]) tmp17 = tl.where(xmask, tmp15, 0) tmp18 = tl.sum(tmp17, 1)[:, None] tmp19 = 16.0 tmp20 = tmp18 / tmp19 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(in_out_ptr0 + (r2 + (16*x3)), tmp2, xmask) tl.debug_barrier() tl.store(in_out_ptr1 + (x3), tmp23, xmask) tl.store(out_ptr0 + (x3), tmp12, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/3g/c3gbhm3y6wldudvsxdmmjh5ssg2uys5qqk3dd3k7bxnuot4xhndp.py # Topologically Sorted Source Nodes: [instance_norm], Original ATen: [aten.repeat] # Source node to ATen node mapping: # instance_norm => repeat # Graph fragment: # %repeat : [num_users=2] = call_function[target=torch.ops.aten.repeat.default](args = (%primals_4, [4]), kwargs = {}) triton_poi_fused_repeat_2 = async_compile.triton('triton_poi_fused_repeat_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_repeat_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_repeat_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0 % 4), xmask) tl.store(out_ptr0 + (x0), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/6x/c6xlvvnj6ftmp7jka4547n3hpffcz5xr3op3wtbpv5povsb6rjue.py # Topologically Sorted Source Nodes: [out_2, out_3], Original ATen: [aten.relu, aten.reflection_pad2d] # Source node to ATen node mapping: # out_2 => relu # out_3 => _unsafe_index_2, _unsafe_index_3 # Graph fragment: # %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) # %_unsafe_index_2 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%relu, [None, None, %sub_1, None]), kwargs = {}) # %_unsafe_index_3 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index_2, [None, None, None, %sub_1]), kwargs = {}) triton_poi_fused_reflection_pad2d_relu_3 = async_compile.triton('triton_poi_fused_reflection_pad2d_relu_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_reflection_pad2d_relu_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_reflection_pad2d_relu_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 6 x1 = (xindex // 6) % 6 x2 = (xindex // 36) x3 = xindex tmp0 = tl.load(in_ptr0 + (15 + ((-1)*(tl_math.abs((-3) + (tl_math.abs((-1) + x0))))) + ((-4)*(tl_math.abs((-3) + (tl_math.abs((-1) + x1))))) + (16*x2)), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x2), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x2), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x2), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + (x2), xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tmp9 = tl.full([1], 0, tl.int32) tmp10 = triton_helpers.maximum(tmp9, tmp8) tl.store(out_ptr0 + (x3), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/f6/cf6z47jjg5avrtu2dn7ifr4mx5pcq57zzoksapbtrbrglr3gqnxi.py # Topologically Sorted Source Nodes: [out_4, out_5, out_6], Original ATen: [aten.convolution, aten.repeat, aten._native_batch_norm_legit, aten.add] # Source node to ATen node mapping: # out_4 => convolution_1 # out_5 => add_2, repeat_2, rsqrt_1, var_mean_1 # out_6 => add_4 # Graph fragment: # %convolution_1 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_3, %primals_6, %primals_7, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %repeat_2 : [num_users=2] = call_function[target=torch.ops.aten.repeat.default](args = (%primals_8, [4]), kwargs = {}) # %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_2, [0, 2, 3]), kwargs = {correction: 0, keepdim: True}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-05), kwargs = {}) # %rsqrt_1 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_2,), kwargs = {}) # %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_3, %primals_1), kwargs = {}) triton_per_fused__native_batch_norm_legit_add_convolution_repeat_4 = async_compile.triton('triton_per_fused__native_batch_norm_legit_add_convolution_repeat_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[16, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: 'i32', 10: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit_add_convolution_repeat_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__native_batch_norm_legit_add_convolution_repeat_4(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, out_ptr3, out_ptr4, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 16 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) x0 = xindex r3 = rindex x1 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x0 % 4), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_out_ptr0 + (r3 + (16*x0)), xmask, other=0.0) tmp2 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp28 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp30 = tl.load(in_ptr3 + (r3 + (16*x0)), xmask, other=0.0) tmp3 = tmp1 + tmp2 tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK]) tmp9 = tl.where(xmask, tmp7, 0) tmp10 = tl.sum(tmp9, 1)[:, None] tmp11 = tl.full([XBLOCK, 1], 16, tl.int32) tmp12 = tmp11.to(tl.float32) tmp13 = tmp10 / tmp12 tmp14 = tmp4 - tmp13 tmp15 = tmp14 * tmp14 tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK]) tmp18 = tl.where(xmask, tmp16, 0) tmp19 = tl.sum(tmp18, 1)[:, None] tmp20 = tmp3 - tmp13 tmp21 = 16.0 tmp22 = tmp19 / tmp21 tmp23 = 1e-05 tmp24 = tmp22 + tmp23 tmp25 = libdevice.rsqrt(tmp24) tmp26 = tmp20 * tmp25 tmp27 = tmp26 * tmp0 tmp29 = tmp27 + tmp28 tmp31 = tmp29 + tmp30 tl.store(out_ptr0 + (x0), tmp0, xmask) tl.store(in_out_ptr0 + (r3 + (16*x0)), tmp3, xmask) tl.store(out_ptr3 + (r3 + (16*x0)), tmp31, xmask) tl.store(out_ptr4 + (x0), tmp25, xmask) tl.store(out_ptr1 + (x0), tmp13, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, ), (1, )) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_7, (4, ), (1, )) assert_size_stride(primals_8, (4, ), (1, )) assert_size_stride(primals_9, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32) # Topologically Sorted Source Nodes: [out], Original ATen: [aten.reflection_pad2d] stream0 = get_raw_stream(0) triton_poi_fused_reflection_pad2d_0.run(primals_1, buf0, 576, grid=grid(576), stream=stream0) # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1)) buf2 = buf1; del buf1 # reuse buf5 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 1, 1), torch.float32) buf6 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32) buf8 = reinterpret_tensor(buf6, (1, 16, 1, 1), (16, 1, 1, 1), 0); del buf6 # reuse # Topologically Sorted Source Nodes: [out_1, instance_norm], Original ATen: [aten.convolution, aten._native_batch_norm_legit] triton_per_fused__native_batch_norm_legit_convolution_1.run(buf2, buf8, primals_3, buf5, 16, 16, grid=grid(16), stream=stream0) del primals_3 buf3 = empty_strided_cuda((16, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [instance_norm], Original ATen: [aten.repeat] triton_poi_fused_repeat_2.run(primals_4, buf3, 16, grid=grid(16), stream=stream0) del primals_4 buf4 = empty_strided_cuda((16, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [instance_norm], Original ATen: [aten.repeat] triton_poi_fused_repeat_2.run(primals_5, buf4, 16, grid=grid(16), stream=stream0) del primals_5 buf9 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32) # Topologically Sorted Source Nodes: [out_2, out_3], Original ATen: [aten.relu, aten.reflection_pad2d] triton_poi_fused_reflection_pad2d_relu_3.run(buf2, buf5, buf8, buf3, buf4, buf9, 576, grid=grid(576), stream=stream0) # Topologically Sorted Source Nodes: [out_4], Original ATen: [aten.convolution] buf10 = extern_kernels.convolution(buf9, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf10, (4, 4, 4, 4), (64, 16, 4, 1)) buf12 = empty_strided_cuda((16, ), (1, ), torch.float32) buf11 = buf10; del buf10 # reuse buf13 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32) buf17 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf16 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32) # Topologically Sorted Source Nodes: [out_4, out_5, out_6], Original ATen: [aten.convolution, aten.repeat, aten._native_batch_norm_legit, aten.add] triton_per_fused__native_batch_norm_legit_add_convolution_repeat_4.run(buf11, primals_8, primals_7, primals_9, primals_1, buf12, buf13, buf17, buf16, 16, 16, grid=grid(16), stream=stream0) del primals_1 del primals_7 del primals_8 del primals_9 return (buf17, primals_2, primals_6, buf0, buf2, buf3, buf4, buf5, buf8, buf9, buf11, buf12, reinterpret_tensor(buf16, (16, ), (1, ), 0), reinterpret_tensor(buf13, (1, 16, 1, 1), (16, 1, 1, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.onnx class ConvLayer(torch.nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride): super(ConvLayer, self).__init__() reflection_padding = kernel_size // 2 self.reflection_pad = torch.nn.ReflectionPad2d(reflection_padding) self.conv2d = torch.nn.Conv2d(in_channels, out_channels, kernel_size, stride) def forward(self, x): out = self.reflection_pad(x) out = self.conv2d(out) return out class ResidualBlock(torch.nn.Module): """ResidualBlock introduced in: https://arxiv.org/abs/1512.03385 recommended architecture: http://torch.ch/blog/2016/02/04/resnets.html """ def __init__(self, channels): super(ResidualBlock, self).__init__() self.conv1 = ConvLayer(channels, channels, kernel_size=3, stride=1) self.in1 = torch.nn.InstanceNorm2d(channels, affine=True) self.conv2 = ConvLayer(channels, channels, kernel_size=3, stride=1) self.in2 = torch.nn.InstanceNorm2d(channels, affine=True) self.relu = torch.nn.ReLU() def forward(self, x): residual = x out = self.relu(self.in1(self.conv1(x))) out = self.in2(self.conv2(out)) out = out + residual return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.onnx assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 6 x1 = xindex // 6 % 6 x2 = xindex // 36 x3 = xindex tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 + x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x3, tmp0, xmask) @triton.jit def triton_per_fused__native_batch_norm_legit_convolution_1(in_out_ptr0, in_out_ptr1, in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x3 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (r2 + 16 * x3), xmask, other=0.0) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tl.where(xmask, tmp3, 0) tmp6 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp8 = tl.where(xmask, tmp6, 0) tmp9 = tl.sum(tmp8, 1)[:, None] tmp10 = tl.full([XBLOCK, 1], 16, tl.int32) tmp11 = tmp10.to(tl.float32) tmp12 = tmp9 / tmp11 tmp13 = tmp3 - tmp12 tmp14 = tmp13 * tmp13 tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK]) tmp17 = tl.where(xmask, tmp15, 0) tmp18 = tl.sum(tmp17, 1)[:, None] tmp19 = 16.0 tmp20 = tmp18 / tmp19 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(in_out_ptr0 + (r2 + 16 * x3), tmp2, xmask) tl.debug_barrier() tl.store(in_out_ptr1 + x3, tmp23, xmask) tl.store(out_ptr0 + x3, tmp12, xmask) @triton.jit def triton_poi_fused_repeat_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0 % 4, xmask) tl.store(out_ptr0 + x0, tmp0, xmask) @triton.jit def triton_poi_fused_reflection_pad2d_relu_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 6 x1 = xindex // 6 % 6 x2 = xindex // 36 x3 = xindex tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 + x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x2, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x2, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tmp9 = tl.full([1], 0, tl.int32) tmp10 = triton_helpers.maximum(tmp9, tmp8) tl.store(out_ptr0 + x3, tmp10, xmask) @triton.jit def triton_per_fused__native_batch_norm_legit_add_convolution_repeat_4( in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, out_ptr3, out_ptr4, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) x0 = xindex r3 = rindex x1 = xindex % 4 tmp0 = tl.load(in_ptr0 + x0 % 4, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_out_ptr0 + (r3 + 16 * x0), xmask, other=0.0) tmp2 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp28 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp30 = tl.load(in_ptr3 + (r3 + 16 * x0), xmask, other=0.0) tmp3 = tmp1 + tmp2 tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tl.where(xmask, tmp4, 0) tmp7 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK]) tmp9 = tl.where(xmask, tmp7, 0) tmp10 = tl.sum(tmp9, 1)[:, None] tmp11 = tl.full([XBLOCK, 1], 16, tl.int32) tmp12 = tmp11.to(tl.float32) tmp13 = tmp10 / tmp12 tmp14 = tmp4 - tmp13 tmp15 = tmp14 * tmp14 tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK]) tmp18 = tl.where(xmask, tmp16, 0) tmp19 = tl.sum(tmp18, 1)[:, None] tmp20 = tmp3 - tmp13 tmp21 = 16.0 tmp22 = tmp19 / tmp21 tmp23 = 1e-05 tmp24 = tmp22 + tmp23 tmp25 = libdevice.rsqrt(tmp24) tmp26 = tmp20 * tmp25 tmp27 = tmp26 * tmp0 tmp29 = tmp27 + tmp28 tmp31 = tmp29 + tmp30 tl.store(out_ptr0 + x0, tmp0, xmask) tl.store(in_out_ptr0 + (r3 + 16 * x0), tmp3, xmask) tl.store(out_ptr3 + (r3 + 16 * x0), tmp31, xmask) tl.store(out_ptr4 + x0, tmp25, xmask) tl.store(out_ptr1 + x0, tmp13, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32) get_raw_stream(0) triton_poi_fused_reflection_pad2d_0[grid(576)](primals_1, buf0, 576, XBLOCK=128, num_warps=4, num_stages=1) buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1)) buf2 = buf1 del buf1 buf5 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 1, 1), torch.float32) buf6 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32 ) buf8 = reinterpret_tensor(buf6, (1, 16, 1, 1), (16, 1, 1, 1), 0) del buf6 triton_per_fused__native_batch_norm_legit_convolution_1[grid(16)](buf2, buf8, primals_3, buf5, 16, 16, XBLOCK=1, num_warps=2, num_stages=1) del primals_3 buf3 = empty_strided_cuda((16,), (1,), torch.float32) triton_poi_fused_repeat_2[grid(16)](primals_4, buf3, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_4 buf4 = empty_strided_cuda((16,), (1,), torch.float32) triton_poi_fused_repeat_2[grid(16)](primals_5, buf4, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_5 buf9 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32) triton_poi_fused_reflection_pad2d_relu_3[grid(576)](buf2, buf5, buf8, buf3, buf4, buf9, 576, XBLOCK=256, num_warps=4, num_stages=1) buf10 = extern_kernels.convolution(buf9, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf10, (4, 4, 4, 4), (64, 16, 4, 1)) buf12 = empty_strided_cuda((16,), (1,), torch.float32) buf11 = buf10 del buf10 buf13 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch. float32) buf17 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf16 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch. float32) triton_per_fused__native_batch_norm_legit_add_convolution_repeat_4[grid (16)](buf11, primals_8, primals_7, primals_9, primals_1, buf12, buf13, buf17, buf16, 16, 16, XBLOCK=8, num_warps=2, num_stages=1) del primals_1 del primals_7 del primals_8 del primals_9 return (buf17, primals_2, primals_6, buf0, buf2, buf3, buf4, buf5, buf8, buf9, buf11, buf12, reinterpret_tensor(buf16, (16,), (1,), 0), reinterpret_tensor(buf13, (1, 16, 1, 1), (16, 1, 1, 1), 0)) class ConvLayer(torch.nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride): super(ConvLayer, self).__init__() reflection_padding = kernel_size // 2 self.reflection_pad = torch.nn.ReflectionPad2d(reflection_padding) self.conv2d = torch.nn.Conv2d(in_channels, out_channels, kernel_size, stride) def forward(self, x): out = self.reflection_pad(x) out = self.conv2d(out) return out class ResidualBlockNew(torch.nn.Module): """ResidualBlock introduced in: https://arxiv.org/abs/1512.03385 recommended architecture: http://torch.ch/blog/2016/02/04/resnets.html """ def __init__(self, channels): super(ResidualBlockNew, self).__init__() self.conv1 = ConvLayer(channels, channels, kernel_size=3, stride=1) self.in1 = torch.nn.InstanceNorm2d(channels, affine=True) self.conv2 = ConvLayer(channels, channels, kernel_size=3, stride=1) self.in2 = torch.nn.InstanceNorm2d(channels, affine=True) self.relu = torch.nn.ReLU() def forward(self, input_0): primals_2 = self.conv1.conv2d.weight primals_3 = self.conv1.conv2d.bias primals_4 = self.in1.weight primals_5 = self.in1.bias primals_6 = self.conv2.conv2d.weight primals_7 = self.conv2.conv2d.bias primals_8 = self.in2.weight primals_9 = self.in2.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
Ali-ry/azureml-examples
ResidualBlock
false
1,996
[ "MIT" ]
0
817ae89d2766dcafd70937a22cb3a80f100a2906
https://github.com/Ali-ry/azureml-examples/tree/817ae89d2766dcafd70937a22cb3a80f100a2906
import torch import torch.onnx class ConvLayer(torch.nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride): super().__init__() reflection_padding = kernel_size // 2 self.reflection_pad = torch.nn.ReflectionPad2d(reflection_padding) self.conv2d = torch.nn.Conv2d(in_channels, out_channels, kernel_size, stride) def forward(self, x): out = self.reflection_pad(x) out = self.conv2d(out) return out class Model(torch.nn.Module): """ResidualBlock introduced in: https://arxiv.org/abs/1512.03385 recommended architecture: http://torch.ch/blog/2016/02/04/resnets.html """ def __init__(self, channels): super().__init__() self.conv1 = ConvLayer(channels, channels, kernel_size=3, stride=1) self.in1 = torch.nn.InstanceNorm2d(channels, affine=True) self.conv2 = ConvLayer(channels, channels, kernel_size=3, stride=1) self.in2 = torch.nn.InstanceNorm2d(channels, affine=True) self.relu = torch.nn.ReLU() def forward(self, x): residual = x out = self.relu(self.in1(self.conv1(x))) out = self.in2(self.conv2(out)) out = out + residual return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4]
AvgReadout
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/2c/c2caasuan6xkydnq2bvliamlyid6cl5fcz5kcz2mnyns45wtxqbs.py # Topologically Sorted Source Nodes: [mean], Original ATen: [aten.mean] # Source node to ATen node mapping: # mean => mean # Graph fragment: # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%arg0_1, [0]), kwargs = {}) triton_poi_fused_mean_0 = async_compile.triton('triton_poi_fused_mean_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mean_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mean_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr0 + (64 + x0), xmask) tmp3 = tl.load(in_ptr0 + (128 + x0), xmask) tmp5 = tl.load(in_ptr0 + (192 + x0), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tl.store(out_ptr0 + (x0), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mean], Original ATen: [aten.mean] stream0 = get_raw_stream(0) triton_poi_fused_mean_0.run(arg0_1, buf0, 64, grid=grid(64), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class AvgReadout(nn.Module): """ Considering the efficiency of the method, we simply employ average pooling, computing the average of the set of embedding matrices .. math:: \\begin{equation} \\mathbf{H}=\\mathcal{Q}\\left(\\left\\{\\mathbf{H}^{(r)} \\mid r \\in \\mathcal{R}\\right\\}\\right)=\\frac{1}{|\\mathcal{R}|} \\sum_{r \\in \\mathcal{R}} \\mathbf{H}^{(r)} \\end{equation} """ def __init__(self): super(AvgReadout, self).__init__() def forward(self, seq): return torch.mean(seq, 0) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mean_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + (64 + x0), xmask) tmp3 = tl.load(in_ptr0 + (128 + x0), xmask) tmp5 = tl.load(in_ptr0 + (192 + x0), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tl.store(out_ptr0 + x0, tmp8, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mean_0[grid(64)](arg0_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 return buf0, class AvgReadoutNew(nn.Module): """ Considering the efficiency of the method, we simply employ average pooling, computing the average of the set of embedding matrices .. math:: \\begin{equation} \\mathbf{H}=\\mathcal{Q}\\left(\\left\\{\\mathbf{H}^{(r)} \\mid r \\in \\mathcal{R}\\right\\}\\right)=\\frac{1}{|\\mathcal{R}|} \\sum_{r \\in \\mathcal{R}} \\mathbf{H}^{(r)} \\end{equation} """ def __init__(self): super(AvgReadoutNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
BUPTlfq/OpenHGNN
AvgReadout
false
1,997
[ "Apache-2.0" ]
0
77041e68c33a8a42a2c187c6e42d85b81cbb25d3
https://github.com/BUPTlfq/OpenHGNN/tree/77041e68c33a8a42a2c187c6e42d85b81cbb25d3
import torch import torch.nn as nn class Model(nn.Module): """ Considering the efficiency of the method, we simply employ average pooling, computing the average of the set of embedding matrices .. math:: \\begin{equation} \\mathbf{H}=\\mathcal{Q}\\left(\\left\\{\\mathbf{H}^{(r)} \\mid r \\in \\mathcal{R}\\right\\}\\right)=\\frac{1}{|\\mathcal{R}|} \\sum_{r \\in \\mathcal{R}} \\mathbf{H}^{(r)} \\end{equation} """ def __init__(self): super().__init__() def forward(self, seq): return torch.mean(seq, 0) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
BCEDiceLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/z6/cz6ehk6udjuldkbvdykpkjp4ihcvsvw26c57rsotg2zyo22imkez.py # Topologically Sorted Source Nodes: [bce], Original ATen: [aten.binary_cross_entropy_with_logits] # Source node to ATen node mapping: # bce => abs_1, exp, full_default, log1p, mean, minimum, mul, neg, sub, sub_1, sub_2 # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg0_1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %arg1_1), kwargs = {}) # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %minimum : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%full_default, %arg1_1), kwargs = {}) # %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%arg1_1,), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%abs_1,), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%neg,), kwargs = {}) # %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum, %log1p), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %sub_1), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sub_2,), kwargs = {}) triton_per_fused_binary_cross_entropy_with_logits_0 = async_compile.triton('triton_per_fused_binary_cross_entropy_with_logits_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_binary_cross_entropy_with_logits_0', 'mutated_arg_names': [], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_binary_cross_entropy_with_logits_0(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp3 = tl.load(in_ptr1 + (r0), None) tmp1 = 1.0 tmp2 = tmp1 - tmp0 tmp4 = tmp2 * tmp3 tmp5 = 0.0 tmp6 = triton_helpers.minimum(tmp5, tmp3) tmp7 = tl_math.abs(tmp3) tmp8 = -tmp7 tmp9 = tl_math.exp(tmp8) tmp10 = libdevice.log1p(tmp9) tmp11 = tmp6 - tmp10 tmp12 = tmp4 - tmp11 tmp13 = tl.broadcast_to(tmp12, [RBLOCK]) tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0)) tl.store(out_ptr0 + (tl.full([1], 0, tl.int32)), tmp15, None) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/zs/czsfocxocrg5tbsgfmzmhomswsg7jp4tsfc6o6cysqu2g7ckglyt.py # Topologically Sorted Source Nodes: [intersection, sum_1, sum_2, sum_3], Original ATen: [aten.mul, aten.sum] # Source node to ATen node mapping: # intersection => mul_1 # sum_1 => sum_1 # sum_2 => sum_2 # sum_3 => sum_3 # Graph fragment: # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view, %view_1), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_1, [1]), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%view, [1]), kwargs = {}) # %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%view_1, [1]), kwargs = {}) triton_per_fused_mul_sum_1 = async_compile.triton('triton_per_fused_mul_sum_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[4, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mul_sum_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 3, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_mul_sum_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 4 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (64*x0)), xmask, other=0.0) tmp2 = tl.load(in_ptr1 + (r1 + (64*x0)), xmask, other=0.0) tmp1 = tl.sigmoid(tmp0) tmp3 = tmp1 * tmp2 tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp10 = tl.where(xmask, tmp8, 0) tmp11 = tl.sum(tmp10, 1)[:, None] tmp12 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp14 = tl.where(xmask, tmp12, 0) tmp15 = tl.sum(tmp14, 1)[:, None] tl.store(out_ptr0 + (x0), tmp7, xmask) tl.store(out_ptr1 + (x0), tmp11, xmask) tl.store(out_ptr2 + (x0), tmp15, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/2f/c2fvdreglrickmt4nxtlq5l7mdvbwbzqliwqtp2oyxpt7isnykti.py # Topologically Sorted Source Nodes: [bce, mul_2, mul_1, add, add_1, add_2, dice, sum_4, truediv_1, dice_1, add_3], Original ATen: [aten.binary_cross_entropy_with_logits, aten.mul, aten.add, aten.div, aten.sum, aten.rsub] # Source node to ATen node mapping: # add => add # add_1 => add_1 # add_2 => add_2 # add_3 => add_3 # bce => abs_1, exp, full_default, log1p, mean, minimum, mul, neg, sub, sub_1, sub_2 # dice => div # dice_1 => sub_3 # mul_1 => mul_2 # mul_2 => mul_3 # sum_4 => sum_4 # truediv_1 => div_1 # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg0_1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %arg1_1), kwargs = {}) # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %minimum : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%full_default, %arg1_1), kwargs = {}) # %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%arg1_1,), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%abs_1,), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%neg,), kwargs = {}) # %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum, %log1p), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %sub_1), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sub_2,), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean, 0.5), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_1, 2.0), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_2, 1e-05), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_2, %sum_3), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, 1e-05), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add, %add_2), kwargs = {}) # %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%div,), kwargs = {}) # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_4, 4), kwargs = {}) # %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %div_1), kwargs = {}) # %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_3, %sub_3), kwargs = {}) triton_per_fused_add_binary_cross_entropy_with_logits_div_mul_rsub_sum_2 = async_compile.triton('triton_per_fused_add_binary_cross_entropy_with_logits_div_mul_rsub_sum_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 4], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {4: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=(4,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_binary_cross_entropy_with_logits_div_mul_rsub_sum_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_binary_cross_entropy_with_logits_div_mul_rsub_sum_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 4 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp5 = tl.load(in_ptr1 + (r0), None) tmp6 = tl.load(in_ptr2 + (r0), None) tmp13 = tl.load(in_out_ptr0 + (0)) tmp14 = tl.broadcast_to(tmp13, [XBLOCK, 1]) tmp1 = 2.0 tmp2 = tmp0 * tmp1 tmp3 = 1e-05 tmp4 = tmp2 + tmp3 tmp7 = tmp5 + tmp6 tmp8 = tmp7 + tmp3 tmp9 = tmp4 / tmp8 tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK]) tmp12 = tl.sum(tmp10, 1)[:, None] tmp15 = 256.0 tmp16 = tmp14 / tmp15 tmp17 = 0.5 tmp18 = tmp16 * tmp17 tmp19 = 0.25 tmp20 = tmp12 * tmp19 tmp21 = 1.0 tmp22 = tmp21 - tmp20 tmp23 = tmp18 + tmp22 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp23, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) # Topologically Sorted Source Nodes: [bce], Original ATen: [aten.binary_cross_entropy_with_logits] stream0 = get_raw_stream(0) triton_per_fused_binary_cross_entropy_with_logits_0.run(arg0_1, arg1_1, buf0, 1, 256, grid=grid(1), stream=stream0) buf1 = empty_strided_cuda((4, ), (1, ), torch.float32) buf2 = empty_strided_cuda((4, ), (1, ), torch.float32) buf3 = empty_strided_cuda((4, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [intersection, sum_1, sum_2, sum_3], Original ATen: [aten.mul, aten.sum] triton_per_fused_mul_sum_1.run(arg1_1, arg0_1, buf1, buf2, buf3, 4, 64, grid=grid(4), stream=stream0) del arg0_1 del arg1_1 buf5 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [bce, mul_2, mul_1, add, add_1, add_2, dice, sum_4, truediv_1, dice_1, add_3], Original ATen: [aten.binary_cross_entropy_with_logits, aten.mul, aten.add, aten.div, aten.sum, aten.rsub] triton_per_fused_add_binary_cross_entropy_with_logits_div_mul_rsub_sum_2.run(buf5, buf1, buf2, buf3, 1, 4, grid=grid(1), stream=stream0) del buf1 del buf2 del buf3 return (buf5, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.utils.data import torch import torch.nn as nn import torch.nn.functional as F class BCEDiceLoss(nn.Module): def __init__(self): super().__init__() def forward(self, output, target): bce = F.binary_cross_entropy_with_logits(output, target) smooth = 1e-05 output = torch.sigmoid(output) num = target.size(0) output = output.view(num, -1) target = target.view(num, -1) intersection = output * target dice = (2.0 * intersection.sum(1) + smooth) / (output.sum(1) + target.sum(1) + smooth) dice = 1 - dice.sum() / num return 0.5 * bce + dice def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.utils.data import torch import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_binary_cross_entropy_with_logits_0(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp3 = tl.load(in_ptr1 + r0, None) tmp1 = 1.0 tmp2 = tmp1 - tmp0 tmp4 = tmp2 * tmp3 tmp5 = 0.0 tmp6 = triton_helpers.minimum(tmp5, tmp3) tmp7 = tl_math.abs(tmp3) tmp8 = -tmp7 tmp9 = tl_math.exp(tmp8) tmp10 = libdevice.log1p(tmp9) tmp11 = tmp6 - tmp10 tmp12 = tmp4 - tmp11 tmp13 = tl.broadcast_to(tmp12, [RBLOCK]) tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0)) tl.store(out_ptr0 + tl.full([1], 0, tl.int32), tmp15, None) @triton.jit def triton_per_fused_mul_sum_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0) tmp2 = tl.load(in_ptr1 + (r1 + 64 * x0), xmask, other=0.0) tmp1 = tl.sigmoid(tmp0) tmp3 = tmp1 * tmp2 tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp10 = tl.where(xmask, tmp8, 0) tmp11 = tl.sum(tmp10, 1)[:, None] tmp12 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp14 = tl.where(xmask, tmp12, 0) tmp15 = tl.sum(tmp14, 1)[:, None] tl.store(out_ptr0 + x0, tmp7, xmask) tl.store(out_ptr1 + x0, tmp11, xmask) tl.store(out_ptr2 + x0, tmp15, xmask) @triton.jit def triton_per_fused_add_binary_cross_entropy_with_logits_div_mul_rsub_sum_2( in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK: tl. constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp5 = tl.load(in_ptr1 + r0, None) tmp6 = tl.load(in_ptr2 + r0, None) tmp13 = tl.load(in_out_ptr0 + 0) tmp14 = tl.broadcast_to(tmp13, [XBLOCK, 1]) tmp1 = 2.0 tmp2 = tmp0 * tmp1 tmp3 = 1e-05 tmp4 = tmp2 + tmp3 tmp7 = tmp5 + tmp6 tmp8 = tmp7 + tmp3 tmp9 = tmp4 / tmp8 tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK]) tmp12 = tl.sum(tmp10, 1)[:, None] tmp15 = 256.0 tmp16 = tmp14 / tmp15 tmp17 = 0.5 tmp18 = tmp16 * tmp17 tmp19 = 0.25 tmp20 = tmp12 * tmp19 tmp21 = 1.0 tmp22 = tmp21 - tmp20 tmp23 = tmp18 + tmp22 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp23, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) get_raw_stream(0) triton_per_fused_binary_cross_entropy_with_logits_0[grid(1)](arg0_1, arg1_1, buf0, 1, 256, num_warps=2, num_stages=1) buf1 = empty_strided_cuda((4,), (1,), torch.float32) buf2 = empty_strided_cuda((4,), (1,), torch.float32) buf3 = empty_strided_cuda((4,), (1,), torch.float32) triton_per_fused_mul_sum_1[grid(4)](arg1_1, arg0_1, buf1, buf2, buf3, 4, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 buf5 = buf0 del buf0 triton_per_fused_add_binary_cross_entropy_with_logits_div_mul_rsub_sum_2[ grid(1)](buf5, buf1, buf2, buf3, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) del buf1 del buf2 del buf3 return buf5, class BCEDiceLossNew(nn.Module): def __init__(self): super().__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
AzmHmd/RMS
BCEDiceLoss
false
1,998
[ "MIT" ]
0
61d108e118d1e06de324644ebd8d92fc1b091b91
https://github.com/AzmHmd/RMS/tree/61d108e118d1e06de324644ebd8d92fc1b091b91
import torch import torch.utils.data import torch import torch.nn as nn import torch.nn.functional as F class Model(nn.Module): def __init__(self): super().__init__() def forward(self, output, target): bce = F.binary_cross_entropy_with_logits(output, target) smooth = 1e-05 output = torch.sigmoid(output) num = target.size(0) output = output.view(num, -1) target = target.view(num, -1) intersection = output * target dice = (2.0 * intersection.sum(1) + smooth) / (output.sum(1) + target.sum(1) + smooth) dice = 1 - dice.sum() / num return 0.5 * bce + dice def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
AP
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/r3/cr3febcwm3t44fuoitsx3ou2p6xg4sk4f7unagmmrvffasxf47te.py # Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # relu => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_3,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, xmask) tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/3v/c3vbbnaoh2ala54xhjzwr7f44xb5tmg7hvdni6ytelrhdlekfg4j.py # Topologically Sorted Source Nodes: [att_logits_1, softmax], Original ATen: [aten.add, aten._softmax] # Source node to ATen node mapping: # att_logits_1 => add # softmax => amax, exp, sub, sum_1 # Graph fragment: # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_8, %squeeze), kwargs = {}) # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%add, [-1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) triton_poi_fused__softmax_add_1 = async_compile.triton('triton_poi_fused__softmax_add_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_add_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_add_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_ptr0 + (4*x2), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + (4*x2)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + (4*x2)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = triton_helpers.maximum(tmp2, tmp5) tmp9 = tmp7 + tmp8 tmp10 = triton_helpers.maximum(tmp6, tmp9) tmp13 = tmp11 + tmp12 tmp14 = triton_helpers.maximum(tmp10, tmp13) tmp15 = tmp2 - tmp14 tmp16 = tl_math.exp(tmp15) tmp17 = tmp5 - tmp14 tmp18 = tl_math.exp(tmp17) tmp19 = tmp16 + tmp18 tmp20 = tmp9 - tmp14 tmp21 = tl_math.exp(tmp20) tmp22 = tmp19 + tmp21 tmp23 = tmp13 - tmp14 tmp24 = tl_math.exp(tmp23) tmp25 = tmp22 + tmp24 tl.store(out_ptr0 + (x2), tmp14, xmask) tl.store(out_ptr1 + (x2), tmp25, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/22/c22avtz547ccq6m4s32gquk4cdjjs4eaicgqenyimouechn25s2u.py # Topologically Sorted Source Nodes: [mul, utter_rep], Original ATen: [aten.mul, aten.sum] # Source node to ATen node mapping: # mul => mul # utter_rep => sum_2 # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, %unsqueeze), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {}) triton_poi_fused_mul_sum_2 = async_compile.triton('triton_poi_fused_mul_sum_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sum_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 20, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_sum_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex % 64 x3 = (xindex // 64) x5 = (xindex // 4) % 16 x2 = (xindex // 16) % 4 x7 = xindex tmp0 = tl.load(in_ptr0 + (x4), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x5 + (64*x3)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + (x5), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr3 + (x2 + (16*x3)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + (x2 + (16*x3)), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr0 + (64 + x4), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr1 + (16 + x5 + (64*x3)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr2 + (16 + x5), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr3 + (4 + x2 + (16*x3)), xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr4 + (4 + x2 + (16*x3)), xmask, eviction_policy='evict_last') tmp21 = tl.load(in_ptr0 + (128 + x4), xmask, eviction_policy='evict_last') tmp22 = tl.load(in_ptr1 + (32 + x5 + (64*x3)), xmask, eviction_policy='evict_last') tmp23 = tl.load(in_ptr2 + (32 + x5), xmask, eviction_policy='evict_last') tmp25 = tl.load(in_ptr3 + (8 + x2 + (16*x3)), xmask, eviction_policy='evict_last') tmp28 = tl.load(in_ptr4 + (8 + x2 + (16*x3)), xmask, eviction_policy='evict_last') tmp32 = tl.load(in_ptr0 + (192 + x4), xmask, eviction_policy='evict_last') tmp33 = tl.load(in_ptr1 + (48 + x5 + (64*x3)), xmask, eviction_policy='evict_last') tmp34 = tl.load(in_ptr2 + (48 + x5), xmask, eviction_policy='evict_last') tmp36 = tl.load(in_ptr3 + (12 + x2 + (16*x3)), xmask, eviction_policy='evict_last') tmp39 = tl.load(in_ptr4 + (12 + x2 + (16*x3)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 - tmp4 tmp6 = tl_math.exp(tmp5) tmp8 = tmp6 / tmp7 tmp9 = tmp0 * tmp8 tmp13 = tmp11 + tmp12 tmp15 = tmp13 - tmp14 tmp16 = tl_math.exp(tmp15) tmp18 = tmp16 / tmp17 tmp19 = tmp10 * tmp18 tmp20 = tmp9 + tmp19 tmp24 = tmp22 + tmp23 tmp26 = tmp24 - tmp25 tmp27 = tl_math.exp(tmp26) tmp29 = tmp27 / tmp28 tmp30 = tmp21 * tmp29 tmp31 = tmp20 + tmp30 tmp35 = tmp33 + tmp34 tmp37 = tmp35 - tmp36 tmp38 = tl_math.exp(tmp37) tmp40 = tmp38 / tmp39 tmp41 = tmp32 * tmp40 tmp42 = tmp31 + tmp41 tl.store(out_ptr0 + (x7), tmp42, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (1, 4), (4, 1)) assert_size_stride(primals_7, (1, ), (1, )) assert_size_stride(primals_8, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [feature_BxTxH], Original ATen: [aten.addmm] extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf0, reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) buf2 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf1 # reuse buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0.run(buf2, primals_5, buf8, 256, grid=grid(256), stream=stream0) del primals_5 buf4 = empty_strided_cuda((64, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.addmm] extern_kernels.addmm(primals_7, reinterpret_tensor(buf2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf4) del primals_7 buf5 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf6 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) # Topologically Sorted Source Nodes: [att_logits_1, softmax], Original ATen: [aten.add, aten._softmax] triton_poi_fused__softmax_add_1.run(primals_8, buf4, buf5, buf6, 64, grid=grid(64), stream=stream0) buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul, utter_rep], Original ATen: [aten.mul, aten.sum] triton_poi_fused_mul_sum_2.run(buf0, primals_8, buf4, buf5, buf6, buf7, 256, grid=grid(256), stream=stream0) del buf5 del buf6 return (buf7, primals_8, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0, reinterpret_tensor(buf2, (64, 4), (4, 1), 0), buf4, primals_6, buf8, primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class AttentivePooling(nn.Module): """ Implementation of Attentive Pooling """ def __init__(self, input_dim, **kwargs): super(AttentivePooling, self).__init__() self.W_a = nn.Linear(input_dim, input_dim) self.W = nn.Linear(input_dim, 1) self.act_fn = nn.ReLU() self.softmax = nn.functional.softmax def forward(self, batch_rep, att_mask): """ input: batch_rep : size (B, T, H), B: batch size, T: sequence length, H: Hidden dimension attention_weight: att_w : size (B, T, 1) return: utter_rep: size (B, H) """ att_logits = self.W(self.act_fn(self.W_a(batch_rep))).squeeze(-1) att_logits = att_mask + att_logits att_w = self.softmax(att_logits, dim=-1).unsqueeze(-1) utter_rep = torch.sum(batch_rep * att_w, dim=1) return utter_rep, att_w class AP(nn.Module): """ Attentive Pooling module incoporate attention mask""" def __init__(self, out_dim, input_dim): super(AP, self).__init__() self.linear = nn.Linear(input_dim, out_dim) self.sap_layer = AttentivePooling(out_dim) self.act_fn = nn.ReLU() def forward(self, feature_BxTxH, att_mask_BxT): """ Arguments feature_BxTxH - [BxTxH] Acoustic feature with shape att_mask_BxT - [BxT] Attention Mask logits """ feature_BxTxH = self.linear(feature_BxTxH) sap_vec, _ = self.sap_layer(feature_BxTxH, att_mask_BxT) return sap_vec def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'out_dim': 4, 'input_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused__softmax_add_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_ptr0 + 4 * x2, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x2), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x2), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = triton_helpers.maximum(tmp2, tmp5) tmp9 = tmp7 + tmp8 tmp10 = triton_helpers.maximum(tmp6, tmp9) tmp13 = tmp11 + tmp12 tmp14 = triton_helpers.maximum(tmp10, tmp13) tmp15 = tmp2 - tmp14 tmp16 = tl_math.exp(tmp15) tmp17 = tmp5 - tmp14 tmp18 = tl_math.exp(tmp17) tmp19 = tmp16 + tmp18 tmp20 = tmp9 - tmp14 tmp21 = tl_math.exp(tmp20) tmp22 = tmp19 + tmp21 tmp23 = tmp13 - tmp14 tmp24 = tl_math.exp(tmp23) tmp25 = tmp22 + tmp24 tl.store(out_ptr0 + x2, tmp14, xmask) tl.store(out_ptr1 + x2, tmp25, xmask) @triton.jit def triton_poi_fused_mul_sum_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex % 64 x3 = xindex // 64 x5 = xindex // 4 % 16 x2 = xindex // 16 % 4 x7 = xindex tmp0 = tl.load(in_ptr0 + x4, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x5 + 64 * x3), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr2 + x5, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr3 + (x2 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp7 = tl.load(in_ptr4 + (x2 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp10 = tl.load(in_ptr0 + (64 + x4), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr1 + (16 + x5 + 64 * x3), xmask, eviction_policy= 'evict_last') tmp12 = tl.load(in_ptr2 + (16 + x5), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr3 + (4 + x2 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp17 = tl.load(in_ptr4 + (4 + x2 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp21 = tl.load(in_ptr0 + (128 + x4), xmask, eviction_policy='evict_last') tmp22 = tl.load(in_ptr1 + (32 + x5 + 64 * x3), xmask, eviction_policy= 'evict_last') tmp23 = tl.load(in_ptr2 + (32 + x5), xmask, eviction_policy='evict_last') tmp25 = tl.load(in_ptr3 + (8 + x2 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp28 = tl.load(in_ptr4 + (8 + x2 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp32 = tl.load(in_ptr0 + (192 + x4), xmask, eviction_policy='evict_last') tmp33 = tl.load(in_ptr1 + (48 + x5 + 64 * x3), xmask, eviction_policy= 'evict_last') tmp34 = tl.load(in_ptr2 + (48 + x5), xmask, eviction_policy='evict_last') tmp36 = tl.load(in_ptr3 + (12 + x2 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp39 = tl.load(in_ptr4 + (12 + x2 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 - tmp4 tmp6 = tl_math.exp(tmp5) tmp8 = tmp6 / tmp7 tmp9 = tmp0 * tmp8 tmp13 = tmp11 + tmp12 tmp15 = tmp13 - tmp14 tmp16 = tl_math.exp(tmp15) tmp18 = tmp16 / tmp17 tmp19 = tmp10 * tmp18 tmp20 = tmp9 + tmp19 tmp24 = tmp22 + tmp23 tmp26 = tmp24 - tmp25 tmp27 = tl_math.exp(tmp26) tmp29 = tmp27 / tmp28 tmp30 = tmp21 * tmp29 tmp31 = tmp20 + tmp30 tmp35 = tmp33 + tmp34 tmp37 = tmp35 - tmp36 tmp38 = tl_math.exp(tmp37) tmp40 = tmp38 / tmp39 tmp41 = tmp32 * tmp40 tmp42 = tmp31 + tmp41 tl.store(out_ptr0 + x7, tmp42, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (1, 4), (4, 1)) assert_size_stride(primals_7, (1,), (1,)) assert_size_stride(primals_8, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(buf0, reinterpret_tensor(primals_4, (4, 4), (1, 4 ), 0), out=buf1) buf2 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf1 buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(256)](buf2, primals_5, buf8, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf2, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_6, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf4) del primals_7 buf5 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf6 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) triton_poi_fused__softmax_add_1[grid(64)](primals_8, buf4, buf5, buf6, 64, XBLOCK=64, num_warps=1, num_stages=1) buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_mul_sum_2[grid(256)](buf0, primals_8, buf4, buf5, buf6, buf7, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf5 del buf6 return buf7, primals_8, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf0, reinterpret_tensor(buf2, (64, 4), (4, 1), 0 ), buf4, primals_6, buf8, primals_4 class AttentivePooling(nn.Module): """ Implementation of Attentive Pooling """ def __init__(self, input_dim, **kwargs): super(AttentivePooling, self).__init__() self.W_a = nn.Linear(input_dim, input_dim) self.W = nn.Linear(input_dim, 1) self.act_fn = nn.ReLU() self.softmax = nn.functional.softmax def forward(self, batch_rep, att_mask): """ input: batch_rep : size (B, T, H), B: batch size, T: sequence length, H: Hidden dimension attention_weight: att_w : size (B, T, 1) return: utter_rep: size (B, H) """ att_logits = self.W(self.act_fn(self.W_a(batch_rep))).squeeze(-1) att_logits = att_mask + att_logits att_w = self.softmax(att_logits, dim=-1).unsqueeze(-1) utter_rep = torch.sum(batch_rep * att_w, dim=1) return utter_rep, att_w class APNew(nn.Module): """ Attentive Pooling module incoporate attention mask""" def __init__(self, out_dim, input_dim): super(APNew, self).__init__() self.linear = nn.Linear(input_dim, out_dim) self.sap_layer = AttentivePooling(out_dim) self.act_fn = nn.ReLU() def forward(self, input_0, input_1): primals_1 = self.linear.weight primals_2 = self.linear.bias primals_4 = self.sap_layer.W_a.weight primals_5 = self.sap_layer.W_a.bias primals_6 = self.sap_layer.W.weight primals_7 = self.sap_layer.W.bias primals_3 = input_0 primals_8 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return output[0]
AyushExel/s3prl
AP
false
1,999
[ "MIT" ]
0
6531904e9621a778978b9cfef3ba9f582e56639a
https://github.com/AyushExel/s3prl/tree/6531904e9621a778978b9cfef3ba9f582e56639a
import torch import torch.nn as nn class AttentivePooling(nn.Module): """ Implementation of Attentive Pooling """ def __init__(self, input_dim, **kwargs): super().__init__() self.W_a = nn.Linear(input_dim, input_dim) self.W = nn.Linear(input_dim, 1) self.act_fn = nn.ReLU() self.softmax = nn.functional.softmax def forward(self, batch_rep, att_mask): """ input: batch_rep : size (B, T, H), B: batch size, T: sequence length, H: Hidden dimension attention_weight: att_w : size (B, T, 1) return: utter_rep: size (B, H) """ att_logits = self.W(self.act_fn(self.W_a(batch_rep))).squeeze(-1) att_logits = att_mask + att_logits att_w = self.softmax(att_logits, dim=-1).unsqueeze(-1) utter_rep = torch.sum(batch_rep * att_w, dim=1) return utter_rep, att_w class Model(nn.Module): """ Attentive Pooling module incoporate attention mask""" def __init__(self, out_dim, input_dim): super().__init__() self.linear = nn.Linear(input_dim, out_dim) self.sap_layer = AttentivePooling(out_dim) self.act_fn = nn.ReLU() def forward(self, feature_BxTxH, att_mask_BxT): """ Arguments feature_BxTxH - [BxTxH] Acoustic feature with shape att_mask_BxT - [BxT] Attention Mask logits """ feature_BxTxH = self.linear(feature_BxTxH) sap_vec, _ = self.sap_layer(feature_BxTxH, att_mask_BxT) return sap_vec def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 4]
SAP
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/dn/cdnhr6ixjduuhci57kobqjnehjrl22mcyjqzuuhvtxxshy437diy.py # Topologically Sorted Source Nodes: [feature], Original ATen: [aten.tanh] # Source node to ATen node mapping: # feature => tanh # Graph fragment: # %tanh : [num_users=3] = call_function[target=torch.ops.aten.tanh.default](args = (%primals_1,), kwargs = {}) triton_poi_fused_tanh_0 = async_compile.triton('triton_poi_fused_tanh_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_tanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = libdevice.tanh(tmp0) tl.store(out_ptr0 + (x0), tmp1, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/3v/c3vbbnaoh2ala54xhjzwr7f44xb5tmg7hvdni6ytelrhdlekfg4j.py # Topologically Sorted Source Nodes: [att_logits_1, softmax], Original ATen: [aten.add, aten._softmax] # Source node to ATen node mapping: # att_logits_1 => add # softmax => amax, exp, sub, sum_1 # Graph fragment: # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_4, %squeeze), kwargs = {}) # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%add, [-1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) triton_poi_fused__softmax_add_1 = async_compile.triton('triton_poi_fused__softmax_add_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_add_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_add_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_ptr0 + (4*x2), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + (4*x2)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + (4*x2)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = triton_helpers.maximum(tmp2, tmp5) tmp9 = tmp7 + tmp8 tmp10 = triton_helpers.maximum(tmp6, tmp9) tmp13 = tmp11 + tmp12 tmp14 = triton_helpers.maximum(tmp10, tmp13) tmp15 = tmp2 - tmp14 tmp16 = tl_math.exp(tmp15) tmp17 = tmp5 - tmp14 tmp18 = tl_math.exp(tmp17) tmp19 = tmp16 + tmp18 tmp20 = tmp9 - tmp14 tmp21 = tl_math.exp(tmp20) tmp22 = tmp19 + tmp21 tmp23 = tmp13 - tmp14 tmp24 = tl_math.exp(tmp23) tmp25 = tmp22 + tmp24 tl.store(out_ptr0 + (x2), tmp14, xmask) tl.store(out_ptr1 + (x2), tmp25, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/22/c22avtz547ccq6m4s32gquk4cdjjs4eaicgqenyimouechn25s2u.py # Topologically Sorted Source Nodes: [mul, utter_rep], Original ATen: [aten.mul, aten.sum] # Source node to ATen node mapping: # mul => mul # utter_rep => sum_2 # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%tanh, %unsqueeze), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {}) triton_poi_fused_mul_sum_2 = async_compile.triton('triton_poi_fused_mul_sum_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sum_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 20, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_sum_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex % 64 x3 = (xindex // 64) x5 = (xindex // 4) % 16 x2 = (xindex // 16) % 4 x7 = xindex tmp0 = tl.load(in_ptr0 + (x4), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x5 + (64*x3)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + (x5), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr3 + (x2 + (16*x3)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + (x2 + (16*x3)), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr0 + (64 + x4), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr1 + (16 + x5 + (64*x3)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr2 + (16 + x5), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr3 + (4 + x2 + (16*x3)), xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr4 + (4 + x2 + (16*x3)), xmask, eviction_policy='evict_last') tmp21 = tl.load(in_ptr0 + (128 + x4), xmask, eviction_policy='evict_last') tmp22 = tl.load(in_ptr1 + (32 + x5 + (64*x3)), xmask, eviction_policy='evict_last') tmp23 = tl.load(in_ptr2 + (32 + x5), xmask, eviction_policy='evict_last') tmp25 = tl.load(in_ptr3 + (8 + x2 + (16*x3)), xmask, eviction_policy='evict_last') tmp28 = tl.load(in_ptr4 + (8 + x2 + (16*x3)), xmask, eviction_policy='evict_last') tmp32 = tl.load(in_ptr0 + (192 + x4), xmask, eviction_policy='evict_last') tmp33 = tl.load(in_ptr1 + (48 + x5 + (64*x3)), xmask, eviction_policy='evict_last') tmp34 = tl.load(in_ptr2 + (48 + x5), xmask, eviction_policy='evict_last') tmp36 = tl.load(in_ptr3 + (12 + x2 + (16*x3)), xmask, eviction_policy='evict_last') tmp39 = tl.load(in_ptr4 + (12 + x2 + (16*x3)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 - tmp4 tmp6 = tl_math.exp(tmp5) tmp8 = tmp6 / tmp7 tmp9 = tmp0 * tmp8 tmp13 = tmp11 + tmp12 tmp15 = tmp13 - tmp14 tmp16 = tl_math.exp(tmp15) tmp18 = tmp16 / tmp17 tmp19 = tmp10 * tmp18 tmp20 = tmp9 + tmp19 tmp24 = tmp22 + tmp23 tmp26 = tmp24 - tmp25 tmp27 = tl_math.exp(tmp26) tmp29 = tmp27 / tmp28 tmp30 = tmp21 * tmp29 tmp31 = tmp20 + tmp30 tmp35 = tmp33 + tmp34 tmp37 = tmp35 - tmp36 tmp38 = tl_math.exp(tmp37) tmp40 = tmp38 / tmp39 tmp41 = tmp32 * tmp40 tmp42 = tmp31 + tmp41 tl.store(out_ptr0 + (x7), tmp42, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1, 4), (4, 1)) assert_size_stride(primals_3, (1, ), (1, )) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [feature], Original ATen: [aten.tanh] stream0 = get_raw_stream(0) triton_poi_fused_tanh_0.run(primals_1, buf0, 256, grid=grid(256), stream=stream0) del primals_1 buf2 = empty_strided_cuda((64, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm] extern_kernels.addmm(primals_3, reinterpret_tensor(buf0, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_2 del primals_3 buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) # Topologically Sorted Source Nodes: [att_logits_1, softmax], Original ATen: [aten.add, aten._softmax] triton_poi_fused__softmax_add_1.run(primals_4, buf2, buf3, buf4, 64, grid=grid(64), stream=stream0) buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul, utter_rep], Original ATen: [aten.mul, aten.sum] triton_poi_fused_mul_sum_2.run(buf0, primals_4, buf2, buf3, buf4, buf5, 256, grid=grid(256), stream=stream0) del buf3 del buf4 return (buf5, primals_4, buf0, buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class SelfAttentionPooling(nn.Module): """ Implementation of SelfAttentionPooling Original Paper: Self-Attention Encoding and Pooling for Speaker Recognition https://arxiv.org/pdf/2008.01077v1.pdf """ def __init__(self, input_dim): super(SelfAttentionPooling, self).__init__() self.W = nn.Linear(input_dim, 1) self.softmax = nn.functional.softmax def forward(self, batch_rep, att_mask): """ input: batch_rep : size (N, T, H), N: batch size, T: sequence length, H: Hidden dimension attention_weight: att_w : size (N, T, 1) return: utter_rep: size (N, H) """ batch_rep.shape[1] att_logits = self.W(batch_rep).squeeze(-1) att_logits = att_mask + att_logits att_w = self.softmax(att_logits, dim=-1).unsqueeze(-1) utter_rep = torch.sum(batch_rep * att_w, dim=1) return utter_rep class SAP(nn.Module): """ Self Attention Pooling module incoporate attention mask""" def __init__(self, out_dim): super(SAP, self).__init__() self.act_fn = nn.Tanh() self.sap_layer = SelfAttentionPooling(out_dim) def forward(self, feature, att_mask): """ Arguments feature - [BxTxD] Acoustic feature with shape att_mask - [BxTx1] Attention Mask logits """ feature = self.act_fn(feature) sap_vec = self.sap_layer(feature, att_mask) return sap_vec def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'out_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_tanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = libdevice.tanh(tmp0) tl.store(out_ptr0 + x0, tmp1, xmask) @triton.jit def triton_poi_fused__softmax_add_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_ptr0 + 4 * x2, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x2), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x2), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = triton_helpers.maximum(tmp2, tmp5) tmp9 = tmp7 + tmp8 tmp10 = triton_helpers.maximum(tmp6, tmp9) tmp13 = tmp11 + tmp12 tmp14 = triton_helpers.maximum(tmp10, tmp13) tmp15 = tmp2 - tmp14 tmp16 = tl_math.exp(tmp15) tmp17 = tmp5 - tmp14 tmp18 = tl_math.exp(tmp17) tmp19 = tmp16 + tmp18 tmp20 = tmp9 - tmp14 tmp21 = tl_math.exp(tmp20) tmp22 = tmp19 + tmp21 tmp23 = tmp13 - tmp14 tmp24 = tl_math.exp(tmp23) tmp25 = tmp22 + tmp24 tl.store(out_ptr0 + x2, tmp14, xmask) tl.store(out_ptr1 + x2, tmp25, xmask) @triton.jit def triton_poi_fused_mul_sum_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex % 64 x3 = xindex // 64 x5 = xindex // 4 % 16 x2 = xindex // 16 % 4 x7 = xindex tmp0 = tl.load(in_ptr0 + x4, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x5 + 64 * x3), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr2 + x5, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr3 + (x2 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp7 = tl.load(in_ptr4 + (x2 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp10 = tl.load(in_ptr0 + (64 + x4), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr1 + (16 + x5 + 64 * x3), xmask, eviction_policy= 'evict_last') tmp12 = tl.load(in_ptr2 + (16 + x5), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr3 + (4 + x2 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp17 = tl.load(in_ptr4 + (4 + x2 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp21 = tl.load(in_ptr0 + (128 + x4), xmask, eviction_policy='evict_last') tmp22 = tl.load(in_ptr1 + (32 + x5 + 64 * x3), xmask, eviction_policy= 'evict_last') tmp23 = tl.load(in_ptr2 + (32 + x5), xmask, eviction_policy='evict_last') tmp25 = tl.load(in_ptr3 + (8 + x2 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp28 = tl.load(in_ptr4 + (8 + x2 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp32 = tl.load(in_ptr0 + (192 + x4), xmask, eviction_policy='evict_last') tmp33 = tl.load(in_ptr1 + (48 + x5 + 64 * x3), xmask, eviction_policy= 'evict_last') tmp34 = tl.load(in_ptr2 + (48 + x5), xmask, eviction_policy='evict_last') tmp36 = tl.load(in_ptr3 + (12 + x2 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp39 = tl.load(in_ptr4 + (12 + x2 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 - tmp4 tmp6 = tl_math.exp(tmp5) tmp8 = tmp6 / tmp7 tmp9 = tmp0 * tmp8 tmp13 = tmp11 + tmp12 tmp15 = tmp13 - tmp14 tmp16 = tl_math.exp(tmp15) tmp18 = tmp16 / tmp17 tmp19 = tmp10 * tmp18 tmp20 = tmp9 + tmp19 tmp24 = tmp22 + tmp23 tmp26 = tmp24 - tmp25 tmp27 = tl_math.exp(tmp26) tmp29 = tmp27 / tmp28 tmp30 = tmp21 * tmp29 tmp31 = tmp20 + tmp30 tmp35 = tmp33 + tmp34 tmp37 = tmp35 - tmp36 tmp38 = tl_math.exp(tmp37) tmp40 = tmp38 / tmp39 tmp41 = tmp32 * tmp40 tmp42 = tmp31 + tmp41 tl.store(out_ptr0 + x7, tmp42, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1, 4), (4, 1)) assert_size_stride(primals_3, (1,), (1,)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_tanh_0[grid(256)](primals_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 buf2 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_3, reinterpret_tensor(buf0, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_2, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_2 del primals_3 buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) triton_poi_fused__softmax_add_1[grid(64)](primals_4, buf2, buf3, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1) buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_mul_sum_2[grid(256)](buf0, primals_4, buf2, buf3, buf4, buf5, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf3 del buf4 return buf5, primals_4, buf0, buf2 class SelfAttentionPooling(nn.Module): """ Implementation of SelfAttentionPooling Original Paper: Self-Attention Encoding and Pooling for Speaker Recognition https://arxiv.org/pdf/2008.01077v1.pdf """ def __init__(self, input_dim): super(SelfAttentionPooling, self).__init__() self.W = nn.Linear(input_dim, 1) self.softmax = nn.functional.softmax def forward(self, batch_rep, att_mask): """ input: batch_rep : size (N, T, H), N: batch size, T: sequence length, H: Hidden dimension attention_weight: att_w : size (N, T, 1) return: utter_rep: size (N, H) """ batch_rep.shape[1] att_logits = self.W(batch_rep).squeeze(-1) att_logits = att_mask + att_logits att_w = self.softmax(att_logits, dim=-1).unsqueeze(-1) utter_rep = torch.sum(batch_rep * att_w, dim=1) return utter_rep class SAPNew(nn.Module): """ Self Attention Pooling module incoporate attention mask""" def __init__(self, out_dim): super(SAPNew, self).__init__() self.act_fn = nn.Tanh() self.sap_layer = SelfAttentionPooling(out_dim) def forward(self, input_0, input_1): primals_2 = self.sap_layer.W.weight primals_3 = self.sap_layer.W.bias primals_1 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
AyushExel/s3prl
SAP
false
2,000
[ "MIT" ]
0
6531904e9621a778978b9cfef3ba9f582e56639a
https://github.com/AyushExel/s3prl/tree/6531904e9621a778978b9cfef3ba9f582e56639a
import torch import torch.nn as nn class SelfAttentionPooling(nn.Module): """ Implementation of SelfAttentionPooling Original Paper: Self-Attention Encoding and Pooling for Speaker Recognition https://arxiv.org/pdf/2008.01077v1.pdf """ def __init__(self, input_dim): super().__init__() self.W = nn.Linear(input_dim, 1) self.softmax = nn.functional.softmax def forward(self, batch_rep, att_mask): """ input: batch_rep : size (N, T, H), N: batch size, T: sequence length, H: Hidden dimension attention_weight: att_w : size (N, T, 1) return: utter_rep: size (N, H) """ batch_rep.shape[1] att_logits = self.W(batch_rep).squeeze(-1) att_logits = att_mask + att_logits att_w = self.softmax(att_logits, dim=-1).unsqueeze(-1) utter_rep = torch.sum(batch_rep * att_w, dim=1) return utter_rep class Model(nn.Module): """ Self Attention Pooling module incoporate attention mask""" def __init__(self, out_dim): super().__init__() self.act_fn = nn.Tanh() self.sap_layer = SelfAttentionPooling(out_dim) def forward(self, feature, att_mask): """ Arguments feature - [BxTxD] Acoustic feature with shape att_mask - [BxTx1] Attention Mask logits """ feature = self.act_fn(feature) sap_vec = self.sap_layer(feature, att_mask) return sap_vec def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4]
Mish
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/nt/cnt242zndfm3n4oylofk2hfby6qwsvn3dgogtgqskjur3zjntaph.py # Topologically Sorted Source Nodes: [softplus, tanh, mul], Original ATen: [aten.softplus, aten.tanh, aten.mul] # Source node to ATen node mapping: # mul => mul # softplus => exp, gt, log1p, where # tanh => tanh # Graph fragment: # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%arg0_1, 20), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%arg0_1,), kwargs = {}) # %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp,), kwargs = {}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %arg0_1, %log1p), kwargs = {}) # %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%where,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %tanh), kwargs = {}) triton_poi_fused_mul_softplus_tanh_0 = async_compile.triton('triton_poi_fused_mul_softplus_tanh_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_softplus_tanh_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_softplus_tanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 20.0 tmp2 = tmp0 > tmp1 tmp3 = tl_math.exp(tmp0) tmp4 = libdevice.log1p(tmp3) tmp5 = tl.where(tmp2, tmp0, tmp4) tmp6 = libdevice.tanh(tmp5) tmp7 = tmp0 * tmp6 tl.store(out_ptr0 + (x0), tmp7, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [softplus, tanh, mul], Original ATen: [aten.softplus, aten.tanh, aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_mul_softplus_tanh_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class Mish(nn.Module): def __init__(self): super(Mish, self).__init__() def forward(self, x): return x * torch.tanh(F.softplus(x)) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mul_softplus_tanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 20.0 tmp2 = tmp0 > tmp1 tmp3 = tl_math.exp(tmp0) tmp4 = libdevice.log1p(tmp3) tmp5 = tl.where(tmp2, tmp0, tmp4) tmp6 = libdevice.tanh(tmp5) tmp7 = tmp0 * tmp6 tl.store(out_ptr0 + x0, tmp7, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_softplus_tanh_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class MishNew(nn.Module): def __init__(self): super(MishNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
BDeMo/yolov4-pytorch
Mish
false
2,001
[ "MIT" ]
0
2434afc88d0890bdb19c5655bb7c577d22bf18d3
https://github.com/BDeMo/yolov4-pytorch/tree/2434afc88d0890bdb19c5655bb7c577d22bf18d3
import torch import torch.nn as nn import torch.nn.functional as F class Model(nn.Module): def __init__(self): super().__init__() def forward(self, x): return x * torch.tanh(F.softplus(x)) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
ResidualAttentionBlock
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/6s/c6sstbvcita246hkfqwdeatnmsh3e6vlcncrzcwlsoqg7dmxvabp.py # Topologically Sorted Source Nodes: [ret], Original ATen: [aten.native_layer_norm] # Source node to ATen node mapping: # ret => add, rsqrt, var_mean # Graph fragment: # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_1, [1]), kwargs = {correction: 0, keepdim: True}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) triton_poi_fused_native_layer_norm_0 = async_compile.triton('triton_poi_fused_native_layer_norm_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + (x0), tmp8, xmask) tl.store(out_ptr1 + (x0), tmp23, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/zv/czv3tzezwxkylzsgkrivaldxprnr7tvjr5iihe4mbc7bzdev5lsj.py # Topologically Sorted Source Nodes: [ret], Original ATen: [aten.native_layer_norm] # Source node to ATen node mapping: # ret => add, add_1, mul, mul_1, rsqrt, sub, var_mean # Graph fragment: # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_1, [1]), kwargs = {correction: 0, keepdim: True}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_1, %getitem_1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_2), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_3), kwargs = {}) triton_poi_fused_native_layer_norm_1 = async_compile.triton('triton_poi_fused_native_layer_norm_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/ah/cahpqo3o7hv3q647n5lretlqvfljlubj4ic7gscxws4yvkm5jzff.py # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.mul] # Source node to ATen node mapping: # multi_head_attention_forward => mul_2 # Graph fragment: # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Scalar](args = (%view_9, 1.0), kwargs = {}) triton_poi_fused_mul_2 = async_compile.triton('triton_poi_fused_mul_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/id/cidhx2qv2kci37tvy6z4ewaqfmq7whhaejd6kgfwwkpiinbays7z.py # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.mul] # Source node to ATen node mapping: # multi_head_attention_forward => mul_3 # Graph fragment: # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Scalar](args = (%permute_6, 1.0), kwargs = {}) triton_poi_fused_mul_3 = async_compile.triton('triton_poi_fused_mul_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/5l/c5lr3fbju3rzhzjnfdhv5evmxwg6sfeq26etlwygdbmjwzziqmty.py # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._safe_softmax] # Source node to ATen node mapping: # multi_head_attention_forward => amax, exp, sub_1 # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_14, [-1], True), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_14, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_1,), kwargs = {}) triton_poi_fused__safe_softmax_4 = async_compile.triton('triton_poi_fused__safe_softmax_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__safe_softmax_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__safe_softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x2), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/ue/cue46mvjwzenos4q7lzsgaczgcooiukadngwwlhdoblv6ugei3uv.py # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._safe_softmax] # Source node to ATen node mapping: # multi_head_attention_forward => any_1, div, eq, full_default, logical_not, logical_not_1, sum_1, where # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) # %eq : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%view_14, -inf), kwargs = {}) # %logical_not : [num_users=1] = call_function[target=torch.ops.aten.logical_not.default](args = (%eq,), kwargs = {}) # %any_1 : [num_users=1] = call_function[target=torch.ops.aten.any.dim](args = (%logical_not, -1, True), kwargs = {}) # %logical_not_1 : [num_users=1] = call_function[target=torch.ops.aten.logical_not.default](args = (%any_1,), kwargs = {}) # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([1, 4, 4, 4], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%logical_not_1, %full_default, %div), kwargs = {}) triton_poi_fused__safe_softmax_5 = async_compile.triton('triton_poi_fused__safe_softmax_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__safe_softmax_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__safe_softmax_5(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp18 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp25 = tl.load(in_ptr1 + (x2), xmask) tmp26 = tl.load(in_ptr1 + (4*x1), xmask, eviction_policy='evict_last') tmp27 = tl.load(in_ptr1 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp29 = tl.load(in_ptr1 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp31 = tl.load(in_ptr1 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp1 = float("-inf") tmp2 = tmp0 == tmp1 tmp3 = tmp2 == 0 tmp4 = tmp3.to(tl.int64) tmp5 = (tmp4 != 0) tmp7 = tmp6 == tmp1 tmp8 = tmp7 == 0 tmp9 = tmp8.to(tl.int64) tmp10 = (tmp9 != 0) tmp11 = tmp5 | tmp10 tmp13 = tmp12 == tmp1 tmp14 = tmp13 == 0 tmp15 = tmp14.to(tl.int64) tmp16 = (tmp15 != 0) tmp17 = tmp11 | tmp16 tmp19 = tmp18 == tmp1 tmp20 = tmp19 == 0 tmp21 = tmp20.to(tl.int64) tmp22 = (tmp21 != 0) tmp23 = tmp17 | tmp22 tmp24 = tmp23 == 0 tmp28 = tmp26 + tmp27 tmp30 = tmp28 + tmp29 tmp32 = tmp30 + tmp31 tmp33 = tmp25 / tmp32 tmp34 = 0.0 tmp35 = tl.where(tmp24, tmp34, tmp33) tl.store(out_ptr0 + (x2), tmp35, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/bu/cbuwmu2mfp5lph35cxxxg6q2iiyw5l7wwbgjvpc2uau2jqndueez.py # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.clone] # Source node to ATen node mapping: # multi_head_attention_forward => clone # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_7,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_6 = async_compile.triton('triton_poi_fused_clone_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 4 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x1)), xmask & ymask) tl.store(out_ptr0 + (x1 + (4*y0)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/vd/cvdxk4eaksba3zsprpzfb3vx7t7nnrtdy3faz6dzcvumwkab3o3e.py # Topologically Sorted Source Nodes: [x, ret_1], Original ATen: [aten.add, aten.native_layer_norm] # Source node to ATen node mapping: # ret_1 => var_mean_1 # x => add_2 # Graph fragment: # %add_2 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %squeeze), kwargs = {}) # %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add_2, [1]), kwargs = {correction: 0, keepdim: True}) triton_poi_fused_add_native_layer_norm_7 = async_compile.triton('triton_poi_fused_add_native_layer_norm_7', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_native_layer_norm_7(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + (x0), tmp16, xmask) tl.store(out_ptr1 + (x0), tmp28, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/ee/ceevx2e7wi6j4h2hf3scvp3at64mcchaq7w4yqwjgj6s77ewvx2h.py # Topologically Sorted Source Nodes: [x, ret_1], Original ATen: [aten.add, aten.native_layer_norm] # Source node to ATen node mapping: # ret_1 => add_3, add_4, mul_4, mul_5, rsqrt_1, sub_2 # x => add_2 # Graph fragment: # %add_2 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %squeeze), kwargs = {}) # %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_8, 1e-05), kwargs = {}) # %rsqrt_1 : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_3,), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_2, %getitem_9), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_2, %rsqrt_1), kwargs = {}) # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_4, %primals_8), kwargs = {}) # %add_4 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_5, %primals_9), kwargs = {}) triton_poi_fused_add_native_layer_norm_8 = async_compile.triton('triton_poi_fused_add_native_layer_norm_8', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_native_layer_norm_8(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x2), xmask) tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + (x2), tmp13, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/s4/cs4y6xlhpgdaidbv4fzi42ren6zhpqqveelpjlrjwzf7b52imcf5.py # Topologically Sorted Source Nodes: [mul, sigmoid, input_2], Original ATen: [aten.mul, aten.sigmoid] # Source node to ATen node mapping: # input_2 => mul_7 # mul => mul_6 # sigmoid => sigmoid # Graph fragment: # %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%addmm_4, 1.702), kwargs = {}) # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%mul_6,), kwargs = {}) # %mul_7 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%addmm_4, %sigmoid), kwargs = {}) triton_poi_fused_mul_sigmoid_9 = async_compile.triton('triton_poi_fused_mul_sigmoid_9', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sigmoid_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_sigmoid_9(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 1.702 tmp2 = tmp0 * tmp1 tmp3 = tl.sigmoid(tmp2) tmp4 = tmp0 * tmp3 tl.store(out_ptr0 + (x0), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/g3/cg3qezdtknb7gkpzhrsk3kwygoxnfplxs7kvhkhixedr43ssvdqx.py # Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.add] # Source node to ATen node mapping: # x => add_2 # x_1 => add_5 # Graph fragment: # %add_2 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %squeeze), kwargs = {}) # %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_13), kwargs = {}) # %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_2, %add_tensor), kwargs = {}) triton_poi_fused_add_10 = async_compile.triton('triton_poi_fused_add_10', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_10', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_10(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x2), xmask) tmp3 = tl.load(in_out_ptr0 + (x2), xmask) tmp4 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tl.store(in_out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (12, 4), (4, 1)) assert_size_stride(primals_5, (12, ), (1, )) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4, ), (1, )) assert_size_stride(primals_8, (4, ), (1, )) assert_size_stride(primals_9, (4, ), (1, )) assert_size_stride(primals_10, (16, 4), (4, 1)) assert_size_stride(primals_11, (16, ), (1, )) assert_size_stride(primals_12, (4, 16), (16, 1)) assert_size_stride(primals_13, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf1 = empty_strided_cuda((4, 1), (1, 4), torch.float32) # Topologically Sorted Source Nodes: [ret], Original ATen: [aten.native_layer_norm] stream0 = get_raw_stream(0) triton_poi_fused_native_layer_norm_0.run(primals_1, buf0, buf1, 4, grid=grid(4), stream=stream0) buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [ret], Original ATen: [aten.native_layer_norm] triton_poi_fused_native_layer_norm_1.run(primals_1, buf0, buf1, primals_2, primals_3, buf2, 16, grid=grid(16), stream=stream0) del primals_2 del primals_3 buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf2, reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf3) buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf2, reinterpret_tensor(primals_4, (4, 4), (1, 4), 16), out=buf4) buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.addmm] extern_kernels.addmm(reinterpret_tensor(primals_5, (4, ), (1, ), 8), buf2, reinterpret_tensor(primals_4, (4, 4), (1, 4), 32), alpha=1, beta=1, out=buf5) buf6 = reinterpret_tensor(buf3, (1, 4, 4, 1), (16, 1, 4, 16), 0); del buf3 # reuse # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.mul] triton_poi_fused_mul_2.run(buf6, primals_5, 16, grid=grid(16), stream=stream0) buf7 = reinterpret_tensor(buf4, (1, 4, 1, 4), (16, 1, 16, 4), 0); del buf4 # reuse # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.mul] triton_poi_fused_mul_3.run(buf7, primals_5, 16, grid=grid(16), stream=stream0) del primals_5 buf8 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf6, (4, 4, 1), (1, 4, 0), 0), reinterpret_tensor(buf7, (4, 1, 4), (1, 0, 4), 0), out=buf8) buf9 = empty_strided_cuda((1, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._safe_softmax] triton_poi_fused__safe_softmax_4.run(buf8, buf9, 64, grid=grid(64), stream=stream0) buf10 = empty_strided_cuda((1, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._safe_softmax] triton_poi_fused__safe_softmax_5.run(buf8, buf9, buf10, 64, grid=grid(64), stream=stream0) buf11 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf10, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf5, (4, 4, 1), (1, 4, 0), 0), out=buf11) buf12 = empty_strided_cuda((4, 1, 4, 1), (4, 1, 1, 4), torch.float32) # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.clone] triton_poi_fused_clone_6.run(buf11, buf12, 4, 4, grid=grid(4, 4), stream=stream0) buf13 = reinterpret_tensor(buf11, (4, 4), (4, 1), 0); del buf11 # reuse # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.addmm] extern_kernels.addmm(primals_7, reinterpret_tensor(buf12, (4, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf13) del primals_7 buf14 = buf1; del buf1 # reuse buf15 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [x, ret_1], Original ATen: [aten.add, aten.native_layer_norm] triton_poi_fused_add_native_layer_norm_7.run(primals_1, buf13, buf14, buf15, 4, grid=grid(4), stream=stream0) buf16 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [x, ret_1], Original ATen: [aten.add, aten.native_layer_norm] triton_poi_fused_add_native_layer_norm_8.run(primals_1, buf13, buf14, buf15, primals_8, primals_9, buf16, 16, grid=grid(16), stream=stream0) del buf14 del buf15 del primals_9 buf17 = reinterpret_tensor(buf9, (4, 16), (16, 1), 0); del buf9 # reuse # Topologically Sorted Source Nodes: [input_1], Original ATen: [aten.addmm] extern_kernels.addmm(primals_11, buf16, reinterpret_tensor(primals_10, (4, 16), (1, 4), 0), alpha=1, beta=1, out=buf17) del primals_11 buf18 = reinterpret_tensor(buf8, (4, 16), (16, 1), 0); del buf8 # reuse # Topologically Sorted Source Nodes: [mul, sigmoid, input_2], Original ATen: [aten.mul, aten.sigmoid] triton_poi_fused_mul_sigmoid_9.run(buf17, buf18, 64, grid=grid(64), stream=stream0) buf19 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf18, reinterpret_tensor(primals_12, (16, 4), (1, 16), 0), out=buf19) buf20 = buf19; del buf19 # reuse # Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.add] triton_poi_fused_add_10.run(buf20, primals_1, buf13, primals_13, 16, grid=grid(16), stream=stream0) del primals_13 return (buf20, primals_1, primals_8, buf2, buf10, reinterpret_tensor(buf12, (4, 4), (4, 1), 0), buf13, buf16, buf17, buf18, primals_12, primals_10, primals_6, reinterpret_tensor(buf5, (4, 1, 4), (1, 1, 4), 0), reinterpret_tensor(buf6, (4, 1, 4), (1, 4, 4), 0), reinterpret_tensor(buf7, (4, 4, 1), (1, 4, 16), 0), reinterpret_tensor(primals_4, (4, 4), (4, 1), 32), reinterpret_tensor(primals_4, (4, 4), (4, 1), 16), reinterpret_tensor(primals_4, (4, 4), (4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((12, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((12, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((4, 16), (16, 1), device='cuda:0', dtype=torch.float32) primals_13 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from collections import OrderedDict from torch import nn class LayerNorm(nn.LayerNorm): """Subclass torch's LayerNorm to handle fp16.""" def forward(self, x: 'torch.Tensor'): orig_type = x.dtype ret = super().forward(x.type(torch.float32)) return ret.type(orig_type) class QuickGELU(nn.Module): def forward(self, x: 'torch.Tensor'): return x * torch.sigmoid(1.702 * x) class ResidualAttentionBlock(nn.Module): def __init__(self, d_model: 'int', n_head: 'int', attn_mask: 'torch.Tensor'=None): super().__init__() self.attn = nn.MultiheadAttention(d_model, n_head) self.ln_1 = LayerNorm(d_model) self.mlp = nn.Sequential(OrderedDict([('c_fc', nn.Linear(d_model, d_model * 4)), ('gelu', QuickGELU()), ('c_proj', nn.Linear( d_model * 4, d_model))])) self.ln_2 = LayerNorm(d_model) self.attn_mask = attn_mask def attention(self, x: 'torch.Tensor'): self.attn_mask = self.attn_mask if self.attn_mask is not None else None return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask )[0] def forward(self, x: 'torch.Tensor'): x = x + self.attention(self.ln_1(x)) x = x + self.mlp(self.ln_2(x)) return x def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'d_model': 4, 'n_head': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from collections import OrderedDict from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_mul_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_mul_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused__safe_softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__safe_softmax_5(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp18 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp25 = tl.load(in_ptr1 + x2, xmask) tmp26 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last') tmp27 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp29 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp31 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = float('-inf') tmp2 = tmp0 == tmp1 tmp3 = tmp2 == 0 tmp4 = tmp3.to(tl.int64) tmp5 = tmp4 != 0 tmp7 = tmp6 == tmp1 tmp8 = tmp7 == 0 tmp9 = tmp8.to(tl.int64) tmp10 = tmp9 != 0 tmp11 = tmp5 | tmp10 tmp13 = tmp12 == tmp1 tmp14 = tmp13 == 0 tmp15 = tmp14.to(tl.int64) tmp16 = tmp15 != 0 tmp17 = tmp11 | tmp16 tmp19 = tmp18 == tmp1 tmp20 = tmp19 == 0 tmp21 = tmp20.to(tl.int64) tmp22 = tmp21 != 0 tmp23 = tmp17 | tmp22 tmp24 = tmp23 == 0 tmp28 = tmp26 + tmp27 tmp30 = tmp28 + tmp29 tmp32 = tmp30 + tmp31 tmp33 = tmp25 / tmp32 tmp34 = 0.0 tmp35 = tl.where(tmp24, tmp34, tmp33) tl.store(out_ptr0 + x2, tmp35, xmask) @triton.jit def triton_poi_fused_clone_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 4 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask) tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_add_native_layer_norm_7(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + x0, tmp16, xmask) tl.store(out_ptr1 + x0, tmp28, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_8(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) @triton.jit def triton_poi_fused_mul_sigmoid_9(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 1.702 tmp2 = tmp0 * tmp1 tmp3 = tl.sigmoid(tmp2) tmp4 = tmp0 * tmp3 tl.store(out_ptr0 + x0, tmp4, xmask) @triton.jit def triton_poi_fused_add_10(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp3 = tl.load(in_out_ptr0 + x2, xmask) tmp4 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tl.store(in_out_ptr0 + x2, tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (12, 4), (4, 1)) assert_size_stride(primals_5, (12,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (4,), (1,)) assert_size_stride(primals_10, (16, 4), (4, 1)) assert_size_stride(primals_11, (16,), (1,)) assert_size_stride(primals_12, (4, 16), (16, 1)) assert_size_stride(primals_13, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf1 = empty_strided_cuda((4, 1), (1, 4), torch.float32) get_raw_stream(0) triton_poi_fused_native_layer_norm_0[grid(4)](primals_1, buf0, buf1, 4, XBLOCK=4, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_native_layer_norm_1[grid(16)](primals_1, buf0, buf1, primals_2, primals_3, buf2, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_2 del primals_3 buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf2, reinterpret_tensor(primals_4, (4, 4), (1, 4 ), 0), out=buf3) buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf2, reinterpret_tensor(primals_4, (4, 4), (1, 4 ), 16), out=buf4) buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(reinterpret_tensor(primals_5, (4,), (1,), 8), buf2, reinterpret_tensor(primals_4, (4, 4), (1, 4), 32), alpha= 1, beta=1, out=buf5) buf6 = reinterpret_tensor(buf3, (1, 4, 4, 1), (16, 1, 4, 16), 0) del buf3 triton_poi_fused_mul_2[grid(16)](buf6, primals_5, 16, XBLOCK=16, num_warps=1, num_stages=1) buf7 = reinterpret_tensor(buf4, (1, 4, 1, 4), (16, 1, 16, 4), 0) del buf4 triton_poi_fused_mul_3[grid(16)](buf7, primals_5, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_5 buf8 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf6, (4, 4, 1), (1, 4, 0), 0 ), reinterpret_tensor(buf7, (4, 1, 4), (1, 0, 4), 0), out=buf8) buf9 = empty_strided_cuda((1, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__safe_softmax_4[grid(64)](buf8, buf9, 64, XBLOCK= 64, num_warps=1, num_stages=1) buf10 = empty_strided_cuda((1, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__safe_softmax_5[grid(64)](buf8, buf9, buf10, 64, XBLOCK=64, num_warps=1, num_stages=1) buf11 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf10, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf5, (4, 4, 1), (1, 4, 0), 0), out=buf11) buf12 = empty_strided_cuda((4, 1, 4, 1), (4, 1, 1, 4), torch.float32) triton_poi_fused_clone_6[grid(4, 4)](buf11, buf12, 4, 4, XBLOCK=4, YBLOCK=4, num_warps=1, num_stages=1) buf13 = reinterpret_tensor(buf11, (4, 4), (4, 1), 0) del buf11 extern_kernels.addmm(primals_7, reinterpret_tensor(buf12, (4, 4), ( 4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf13) del primals_7 buf14 = buf1 del buf1 buf15 = buf0 del buf0 triton_poi_fused_add_native_layer_norm_7[grid(4)](primals_1, buf13, buf14, buf15, 4, XBLOCK=4, num_warps=1, num_stages=1) buf16 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_add_native_layer_norm_8[grid(16)](primals_1, buf13, buf14, buf15, primals_8, primals_9, buf16, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf14 del buf15 del primals_9 buf17 = reinterpret_tensor(buf9, (4, 16), (16, 1), 0) del buf9 extern_kernels.addmm(primals_11, buf16, reinterpret_tensor( primals_10, (4, 16), (1, 4), 0), alpha=1, beta=1, out=buf17) del primals_11 buf18 = reinterpret_tensor(buf8, (4, 16), (16, 1), 0) del buf8 triton_poi_fused_mul_sigmoid_9[grid(64)](buf17, buf18, 64, XBLOCK= 64, num_warps=1, num_stages=1) buf19 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf18, reinterpret_tensor(primals_12, (16, 4), (1, 16), 0), out=buf19) buf20 = buf19 del buf19 triton_poi_fused_add_10[grid(16)](buf20, primals_1, buf13, primals_13, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_13 return (buf20, primals_1, primals_8, buf2, buf10, reinterpret_tensor( buf12, (4, 4), (4, 1), 0), buf13, buf16, buf17, buf18, primals_12, primals_10, primals_6, reinterpret_tensor(buf5, (4, 1, 4), (1, 1, 4 ), 0), reinterpret_tensor(buf6, (4, 1, 4), (1, 4, 4), 0), reinterpret_tensor(buf7, (4, 4, 1), (1, 4, 16), 0), reinterpret_tensor(primals_4, (4, 4), (4, 1), 32), reinterpret_tensor(primals_4, (4, 4), (4, 1), 16), reinterpret_tensor(primals_4, (4, 4), (4, 1), 0)) class LayerNorm(nn.LayerNorm): """Subclass torch's LayerNorm to handle fp16.""" def forward(self, x: 'torch.Tensor'): orig_type = x.dtype ret = super().forward(x.type(torch.float32)) return ret.type(orig_type) class QuickGELU(nn.Module): def forward(self, x: 'torch.Tensor'): return x * torch.sigmoid(1.702 * x) class ResidualAttentionBlockNew(nn.Module): def __init__(self, d_model: 'int', n_head: 'int', attn_mask: 'torch.Tensor'=None): super().__init__() self.attn = nn.MultiheadAttention(d_model, n_head) self.ln_1 = LayerNorm(d_model) self.mlp = nn.Sequential(OrderedDict([('c_fc', nn.Linear(d_model, d_model * 4)), ('gelu', QuickGELU()), ('c_proj', nn.Linear( d_model * 4, d_model))])) self.ln_2 = LayerNorm(d_model) self.attn_mask = attn_mask def attention(self, x: 'torch.Tensor'): self.attn_mask = self.attn_mask if self.attn_mask is not None else None return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask )[0] def forward(self, input_0): primals_4 = self.attn.in_proj_weight primals_5 = self.attn.in_proj_bias primals_1 = self.attn.out_proj.weight primals_2 = self.attn.out_proj.bias primals_3 = self.ln_1.weight primals_7 = self.ln_1.bias primals_10 = self.mlp.c_fc.weight primals_11 = self.mlp.c_fc.bias primals_12 = self.mlp.c_proj.weight primals_8 = self.mlp.c_proj.bias primals_9 = self.ln_2.weight primals_13 = self.ln_2.bias primals_6 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13]) return output[0]
Artanic30/RentalPrediction
ResidualAttentionBlock
false
2,002
[ "MIT" ]
0
5804ab9b453d2a40bce2bb304c31efc98a803ed8
https://github.com/Artanic30/RentalPrediction/tree/5804ab9b453d2a40bce2bb304c31efc98a803ed8
import torch from collections import OrderedDict from torch import nn class LayerNorm(nn.LayerNorm): """Subclass torch's LayerNorm to handle fp16.""" def forward(self, x: 'torch.Tensor'): orig_type = x.dtype ret = super().forward(x.type(torch.float32)) return ret.type(orig_type) class QuickGELU(nn.Module): def forward(self, x: 'torch.Tensor'): return x * torch.sigmoid(1.702 * x) class Model(nn.Module): def __init__(self, d_model: 'int', n_head: 'int', attn_mask: 'torch.Tensor'=None): super().__init__() self.attn = nn.MultiheadAttention(d_model, n_head) self.ln_1 = LayerNorm(d_model) self.mlp = nn.Sequential(OrderedDict([('c_fc', nn.Linear(d_model, d_model * 4)), ('gelu', QuickGELU()), ('c_proj', nn.Linear( d_model * 4, d_model))])) self.ln_2 = LayerNorm(d_model) self.attn_mask = attn_mask def attention(self, x: 'torch.Tensor'): self.attn_mask = self.attn_mask if self.attn_mask is not None else None return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask )[0] def forward(self, x: 'torch.Tensor'): x = x + self.attention(self.ln_1(x)) x = x + self.mlp(self.ln_2(x)) return x def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [4, 4]
MultiheadAttention
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/5w/c5wnubyijcgstpnbhnht5ommr737mwfx67lgpfc6mvwlwmhzfkmq.py # Topologically Sorted Source Nodes: [q_1], Original ATen: [aten.mul] # Source node to ATen node mapping: # q_1 => mul # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 1.0), kwargs = {}) triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/hz/chz2sqsqk26mwhf2dxhgh44jfpu2er5yqjftwkzfav5ctqtx5e7f.py # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] # Source node to ATen node mapping: # softmax => amax, exp, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%bmm, [-1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%bmm, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x2), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/3f/c3fx6bzkalkw7u7askqdnz4rzlcoyqiec4r434sjc5x3axxgkrmr.py # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] # Source node to ATen node mapping: # softmax => div, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %div : [num_users=3] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/um/cumdt56px4jhgi4x7ers5m2jlyr4stfdyfhyb47o43khr5qzdg6f.py # Topologically Sorted Source Nodes: [contiguous_3], Original ATen: [aten.clone] # Source node to ATen node mapping: # contiguous_3 => clone_1 # Graph fragment: # %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_8,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_3 = async_compile.triton('triton_poi_fused_clone_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 4 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x1)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x1 + (16*y0)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/ja/cjapoylwc442ttbiaridhuezkvo2e7durqwl2lytr7aq6togjqw3.py # Topologically Sorted Source Nodes: [sum_1, attn_weights_4], Original ATen: [aten.sum, aten.div] # Source node to ATen node mapping: # attn_weights_4 => div_1 # sum_1 => sum_2 # Graph fragment: # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%view_17, [1]), kwargs = {}) # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_2, 4), kwargs = {}) triton_poi_fused_div_sum_4 = async_compile.triton('triton_poi_fused_div_sum_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_sum_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_sum_4(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = (xindex // 16) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask) tmp1 = tl.load(in_ptr0 + (16 + x0 + (64*x1)), xmask) tmp3 = tl.load(in_ptr0 + (32 + x0 + (64*x1)), xmask) tmp5 = tl.load(in_ptr0 + (48 + x0 + (64*x1)), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 0.25 tmp8 = tmp6 * tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (12, 4), (4, 1)) assert_size_stride(primals_5, (12, ), (1, )) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf0) buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [k], Original ATen: [aten.addmm] extern_kernels.addmm(reinterpret_tensor(primals_5, (4, ), (1, ), 4), reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 16), alpha=1, beta=1, out=buf1) buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [v], Original ATen: [aten.addmm] extern_kernels.addmm(reinterpret_tensor(primals_5, (4, ), (1, ), 8), reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 32), alpha=1, beta=1, out=buf2) del primals_4 buf3 = reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [q_1], Original ATen: [aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_mul_0.run(buf3, primals_5, 64, grid=grid(64), stream=stream0) del primals_5 buf4 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [attn_weights], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (1, 16, 0), 0), reinterpret_tensor(buf1, (16, 1, 4), (1, 1, 16), 0), out=buf4) buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf4, buf5, 256, grid=grid(256), stream=stream0) buf6 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] triton_poi_fused__softmax_2.run(buf5, buf6, 256, grid=grid(256), stream=stream0) del buf5 buf7 = empty_strided_cuda((16, 4, 1), (4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [attn], Original ATen: [aten.bmm] extern_kernels.bmm(buf6, reinterpret_tensor(buf2, (16, 4, 1), (1, 16, 1), 0), out=buf7) buf8 = empty_strided_cuda((4, 16, 1), (16, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [contiguous_3], Original ATen: [aten.clone] triton_poi_fused_clone_3.run(buf7, buf8, 4, 16, grid=grid(4, 16), stream=stream0) buf9 = reinterpret_tensor(buf7, (16, 4), (4, 1), 0); del buf7 # reuse # Topologically Sorted Source Nodes: [attn_2], Original ATen: [aten.addmm] extern_kernels.addmm(primals_7, reinterpret_tensor(buf8, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf9) del primals_7 buf10 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [sum_1, attn_weights_4], Original ATen: [aten.sum, aten.div] triton_poi_fused_div_sum_4.run(buf6, buf10, 64, grid=grid(64), stream=stream0) return (reinterpret_tensor(buf9, (4, 4, 4), (16, 4, 1), 0), buf10, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), buf6, reinterpret_tensor(buf8, (16, 4), (4, 1), 0), primals_6, reinterpret_tensor(buf2, (16, 1, 4), (1, 1, 16), 0), reinterpret_tensor(buf3, (16, 1, 4), (1, 1, 16), 0), reinterpret_tensor(buf1, (16, 4, 1), (1, 16, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((12, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((12, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn from torch.nn import Parameter import torch.nn.functional as F def fill_with_neg_inf(t): """FP16-compatible function that fills a tensor with -inf.""" return t.float().fill_(float('-inf')).type_as(t) def _get_full_incremental_state_key(module_instance, key): module_name = module_instance.__class__.__name__ if not hasattr(module_instance, '_fairseq_instance_id'): INCREMENTAL_STATE_INSTANCE_ID[module_name] += 1 module_instance._fairseq_instance_id = INCREMENTAL_STATE_INSTANCE_ID[ module_name] return '{}.{}.{}'.format(module_name, module_instance. _fairseq_instance_id, key) def get_incremental_state(module, incremental_state, key): """Helper for getting incremental state for an nn.Module.""" full_key = _get_full_incremental_state_key(module, key) if incremental_state is None or full_key not in incremental_state: return None return incremental_state[full_key] def set_incremental_state(module, incremental_state, key, value): """Helper for setting incremental state for an nn.Module.""" if incremental_state is not None: full_key = _get_full_incremental_state_key(module, key) incremental_state[full_key] = value class MultiheadAttention(nn.Module): """Multi-headed attention. See "Attention Is All You Need" for more details. """ def __init__(self, embed_dim, num_heads, dropout=0.0, bias=True): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads assert self.head_dim * num_heads == self.embed_dim, 'embed_dim must be divisible by num_heads' self.scaling = self.head_dim ** -0.5 self._mask = None self.in_proj_weight = Parameter(torch.Tensor(3 * embed_dim, embed_dim)) if bias: self.in_proj_bias = Parameter(torch.Tensor(3 * embed_dim)) else: self.register_parameter('in_proj_bias', None) self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.reset_parameters() def reset_parameters(self): nn.init.xavier_uniform_(self.in_proj_weight) nn.init.xavier_uniform_(self.out_proj.weight) if self.in_proj_bias is not None: nn.init.constant_(self.in_proj_bias, 0.0) nn.init.constant_(self.out_proj.bias, 0.0) def forward(self, query, key, value, mask_future_timesteps=False, key_padding_mask=None, incremental_state=None, need_weights=True, static_kv=False): """Input shape: Time x Batch x Channel Self-attention can be implemented by passing in the same arguments for query, key and value. Future timesteps can be masked with the `mask_future_timesteps` argument. Padding elements can be excluded from the key by passing a binary ByteTensor (`key_padding_mask`) with shape: batch x src_len, where padding elements are indicated by 1s. """ qkv_same = query.data_ptr() == key.data_ptr() == value.data_ptr() kv_same = key.data_ptr() == value.data_ptr() tgt_len, bsz, embed_dim = query.size() assert embed_dim == self.embed_dim assert list(query.size()) == [tgt_len, bsz, embed_dim] assert key.size() == value.size() if incremental_state is not None: saved_state = self._get_input_buffer(incremental_state) if 'prev_key' in saved_state: if static_kv: assert kv_same and not qkv_same key = value = None else: saved_state = None if qkv_same: q, k, v = self.in_proj_qkv(query) elif kv_same: q = self.in_proj_q(query) if key is None: assert value is None k = v = q.new(0) else: k, v = self.in_proj_kv(key) else: q = self.in_proj_q(query) k = self.in_proj_k(key) v = self.in_proj_v(value) q *= self.scaling if saved_state is not None: if 'prev_key' in saved_state: k = torch.cat((saved_state['prev_key'], k), dim=0) if 'prev_value' in saved_state: v = torch.cat((saved_state['prev_value'], v), dim=0) saved_state['prev_key'] = k saved_state['prev_value'] = v self._set_input_buffer(incremental_state, saved_state) src_len = k.size(0) if key_padding_mask is not None: assert key_padding_mask.size(0) == bsz assert key_padding_mask.size(1) == src_len q = q.contiguous().view(tgt_len, bsz * self.num_heads, self.head_dim ).transpose(0, 1) k = k.contiguous().view(src_len, bsz * self.num_heads, self.head_dim ).transpose(0, 1) v = v.contiguous().view(src_len, bsz * self.num_heads, self.head_dim ).transpose(0, 1) attn_weights = torch.bmm(q, k.transpose(1, 2)) assert list(attn_weights.size()) == [bsz * self.num_heads, tgt_len, src_len] if mask_future_timesteps and incremental_state is None: assert query.size() == key.size( ), 'mask_future_timesteps only applies to self-attention' attn_weights += self.buffered_mask(attn_weights).unsqueeze(0) if key_padding_mask is not None: attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights.float().masked_fill(key_padding_mask .unsqueeze(1).unsqueeze(2), float('-inf')).type_as(attn_weights ) attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) attn_weights = F.softmax(attn_weights.float(), dim=-1).type_as( attn_weights) attn_weights = F.dropout(attn_weights, p=self.dropout, training= self.training) attn = torch.bmm(attn_weights, v) assert list(attn.size()) == [bsz * self.num_heads, tgt_len, self. head_dim] attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim) attn = self.out_proj(attn) attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights.sum(dim=1) / self.num_heads return attn, attn_weights def in_proj_qkv(self, query): return self._in_proj(query).chunk(3, dim=-1) def in_proj_kv(self, key): return self._in_proj(key, start=self.embed_dim).chunk(2, dim=-1) def in_proj_q(self, query): return self._in_proj(query, end=self.embed_dim) def in_proj_k(self, key): return self._in_proj(key, start=self.embed_dim, end=2 * self.embed_dim) def in_proj_v(self, value): return self._in_proj(value, start=2 * self.embed_dim) def _in_proj(self, input, start=None, end=None): weight = self.in_proj_weight bias = self.in_proj_bias if end is not None: weight = weight[:end, :] if bias is not None: bias = bias[:end] if start is not None: weight = weight[start:, :] if bias is not None: bias = bias[start:] return F.linear(input, weight, bias) def buffered_mask(self, tensor): dim = tensor.size(-1) if self._mask is None: self._mask = torch.triu(fill_with_neg_inf(tensor.new(dim, dim)), 1) if self._mask.size(0) < dim: self._mask = torch.triu(fill_with_neg_inf(self._mask.resize_( dim, dim)), 1) return self._mask[:dim, :dim] def reorder_incremental_state(self, incremental_state, new_order): """Reorder buffered internal state (for incremental generation).""" input_buffer = self._get_input_buffer(incremental_state) if input_buffer is not None: for k in input_buffer.keys(): input_buffer[k] = input_buffer[k].index_select(1, new_order) self._set_input_buffer(incremental_state, input_buffer) def _get_input_buffer(self, incremental_state): return get_incremental_state(self, incremental_state, 'attn_state' ) or {} def _set_input_buffer(self, incremental_state, buffer): set_incremental_state(self, incremental_state, 'attn_state', buffer) def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4]) ] def get_init_inputs(): return [[], {'embed_dim': 4, 'num_heads': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn from torch.nn import Parameter import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 4 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (x1 + 16 * y0), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_div_sum_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tmp1 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask) tmp3 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask) tmp5 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 0.25 tmp8 = tmp6 * tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (12, 4), (4, 1)) assert_size_stride(primals_5, (12,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf0) buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(reinterpret_tensor(primals_5, (4,), (1,), 4), reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 16), alpha=1, beta=1, out=buf1) buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(reinterpret_tensor(primals_5, (4,), (1,), 8), reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 32), alpha=1, beta=1, out=buf2) del primals_4 buf3 = reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_mul_0[grid(64)](buf3, primals_5, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_5 buf4 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (1, 16, 0), 0), reinterpret_tensor(buf1, (16, 1, 4), (1, 1, 16), 0), out=buf4) buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_1[grid(256)](buf4, buf5, 256, XBLOCK=128, num_warps=4, num_stages=1) buf6 = buf4 del buf4 triton_poi_fused__softmax_2[grid(256)](buf5, buf6, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf5 buf7 = empty_strided_cuda((16, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(buf6, reinterpret_tensor(buf2, (16, 4, 1), (1, 16, 1), 0), out=buf7) buf8 = empty_strided_cuda((4, 16, 1), (16, 1, 1), torch.float32) triton_poi_fused_clone_3[grid(4, 16)](buf7, buf8, 4, 16, XBLOCK=16, YBLOCK=4, num_warps=1, num_stages=1) buf9 = reinterpret_tensor(buf7, (16, 4), (4, 1), 0) del buf7 extern_kernels.addmm(primals_7, reinterpret_tensor(buf8, (16, 4), ( 4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf9) del primals_7 buf10 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_div_sum_4[grid(64)](buf6, buf10, 64, XBLOCK=64, num_warps=1, num_stages=1) return reinterpret_tensor(buf9, (4, 4, 4), (16, 4, 1), 0 ), buf10, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0 ), reinterpret_tensor(primals_2, (16, 4), (4, 1), 0 ), reinterpret_tensor(primals_3, (16, 4), (4, 1), 0 ), buf6, reinterpret_tensor(buf8, (16, 4), (4, 1), 0 ), primals_6, reinterpret_tensor(buf2, (16, 1, 4), (1, 1, 16), 0 ), reinterpret_tensor(buf3, (16, 1, 4), (1, 1, 16), 0 ), reinterpret_tensor(buf1, (16, 4, 1), (1, 16, 1), 0) def fill_with_neg_inf(t): """FP16-compatible function that fills a tensor with -inf.""" return t.float().fill_(float('-inf')).type_as(t) def _get_full_incremental_state_key(module_instance, key): module_name = module_instance.__class__.__name__ if not hasattr(module_instance, '_fairseq_instance_id'): INCREMENTAL_STATE_INSTANCE_ID[module_name] += 1 module_instance._fairseq_instance_id = INCREMENTAL_STATE_INSTANCE_ID[ module_name] return '{}.{}.{}'.format(module_name, module_instance. _fairseq_instance_id, key) def get_incremental_state(module, incremental_state, key): """Helper for getting incremental state for an nn.Module.""" full_key = _get_full_incremental_state_key(module, key) if incremental_state is None or full_key not in incremental_state: return None return incremental_state[full_key] def set_incremental_state(module, incremental_state, key, value): """Helper for setting incremental state for an nn.Module.""" if incremental_state is not None: full_key = _get_full_incremental_state_key(module, key) incremental_state[full_key] = value class MultiheadAttentionNew(nn.Module): """Multi-headed attention. See "Attention Is All You Need" for more details. """ def __init__(self, embed_dim, num_heads, dropout=0.0, bias=True): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads assert self.head_dim * num_heads == self.embed_dim, 'embed_dim must be divisible by num_heads' self.scaling = self.head_dim ** -0.5 self._mask = None self.in_proj_weight = Parameter(torch.Tensor(3 * embed_dim, embed_dim)) if bias: self.in_proj_bias = Parameter(torch.Tensor(3 * embed_dim)) else: self.register_parameter('in_proj_bias', None) self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.reset_parameters() def reset_parameters(self): nn.init.xavier_uniform_(self.in_proj_weight) nn.init.xavier_uniform_(self.out_proj.weight) if self.in_proj_bias is not None: nn.init.constant_(self.in_proj_bias, 0.0) nn.init.constant_(self.out_proj.bias, 0.0) def in_proj_qkv(self, query): return self._in_proj(query).chunk(3, dim=-1) def in_proj_kv(self, key): return self._in_proj(key, start=self.embed_dim).chunk(2, dim=-1) def in_proj_q(self, query): return self._in_proj(query, end=self.embed_dim) def in_proj_k(self, key): return self._in_proj(key, start=self.embed_dim, end=2 * self.embed_dim) def in_proj_v(self, value): return self._in_proj(value, start=2 * self.embed_dim) def _in_proj(self, input, start=None, end=None): weight = self.in_proj_weight bias = self.in_proj_bias if end is not None: weight = weight[:end, :] if bias is not None: bias = bias[:end] if start is not None: weight = weight[start:, :] if bias is not None: bias = bias[start:] return F.linear(input, weight, bias) def buffered_mask(self, tensor): dim = tensor.size(-1) if self._mask is None: self._mask = torch.triu(fill_with_neg_inf(tensor.new(dim, dim)), 1) if self._mask.size(0) < dim: self._mask = torch.triu(fill_with_neg_inf(self._mask.resize_( dim, dim)), 1) return self._mask[:dim, :dim] def reorder_incremental_state(self, incremental_state, new_order): """Reorder buffered internal state (for incremental generation).""" input_buffer = self._get_input_buffer(incremental_state) if input_buffer is not None: for k in input_buffer.keys(): input_buffer[k] = input_buffer[k].index_select(1, new_order) self._set_input_buffer(incremental_state, input_buffer) def _get_input_buffer(self, incremental_state): return get_incremental_state(self, incremental_state, 'attn_state' ) or {} def _set_input_buffer(self, incremental_state, buffer): set_incremental_state(self, incremental_state, 'attn_state', buffer) def forward(self, input_0, input_1, input_2): primals_4 = self.in_proj_weight primals_5 = self.in_proj_bias primals_6 = self.out_proj.weight primals_7 = self.out_proj.bias primals_1 = input_0 primals_2 = input_1 primals_3 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0], output[1]
ArkanDH/Team5-Inverse-Cooking-Stuff
MultiheadAttention
false
2,003
[ "MIT" ]
0
ec224918b25fb7a04aa09995e4d11804448df7dd
https://github.com/ArkanDH/Team5-Inverse-Cooking-Stuff/tree/ec224918b25fb7a04aa09995e4d11804448df7dd
import torch import torch.nn as nn from torch.nn import Parameter import torch.nn.functional as F def fill_with_neg_inf(t): """FP16-compatible function that fills a tensor with -inf.""" return t.float().fill_(float('-inf')).type_as(t) def _get_full_incremental_state_key(module_instance, key): module_name = module_instance.__class__.__name__ if not hasattr(module_instance, '_fairseq_instance_id'): INCREMENTAL_STATE_INSTANCE_ID[module_name] += 1 module_instance._fairseq_instance_id = INCREMENTAL_STATE_INSTANCE_ID[ module_name] return '{}.{}.{}'.format(module_name, module_instance. _fairseq_instance_id, key) def get_incremental_state(module, incremental_state, key): """Helper for getting incremental state for an nn.Module.""" full_key = _get_full_incremental_state_key(module, key) if incremental_state is None or full_key not in incremental_state: return None return incremental_state[full_key] def set_incremental_state(module, incremental_state, key, value): """Helper for setting incremental state for an nn.Module.""" if incremental_state is not None: full_key = _get_full_incremental_state_key(module, key) incremental_state[full_key] = value class Model(nn.Module): """Multi-headed attention. See "Attention Is All You Need" for more details. """ def __init__(self, embed_dim, num_heads, dropout=0.0, bias=True): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads assert self.head_dim * num_heads == self.embed_dim, 'embed_dim must be divisible by num_heads' self.scaling = self.head_dim ** -0.5 self._mask = None self.in_proj_weight = Parameter(torch.Tensor(3 * embed_dim, embed_dim)) if bias: self.in_proj_bias = Parameter(torch.Tensor(3 * embed_dim)) else: self.register_parameter('in_proj_bias', None) self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.reset_parameters() def reset_parameters(self): nn.init.xavier_uniform_(self.in_proj_weight) nn.init.xavier_uniform_(self.out_proj.weight) if self.in_proj_bias is not None: nn.init.constant_(self.in_proj_bias, 0.0) nn.init.constant_(self.out_proj.bias, 0.0) def forward(self, query, key, value, mask_future_timesteps=False, key_padding_mask=None, incremental_state=None, need_weights=True, static_kv=False): """Input shape: Time x Batch x Channel Self-attention can be implemented by passing in the same arguments for query, key and value. Future timesteps can be masked with the `mask_future_timesteps` argument. Padding elements can be excluded from the key by passing a binary ByteTensor (`key_padding_mask`) with shape: batch x src_len, where padding elements are indicated by 1s. """ qkv_same = query.data_ptr() == key.data_ptr() == value.data_ptr() kv_same = key.data_ptr() == value.data_ptr() tgt_len, bsz, embed_dim = query.size() assert embed_dim == self.embed_dim assert list(query.size()) == [tgt_len, bsz, embed_dim] assert key.size() == value.size() if incremental_state is not None: saved_state = self._get_input_buffer(incremental_state) if 'prev_key' in saved_state: if static_kv: assert kv_same and not qkv_same key = value = None else: saved_state = None if qkv_same: q, k, v = self.in_proj_qkv(query) elif kv_same: q = self.in_proj_q(query) if key is None: assert value is None k = v = q.new(0) else: k, v = self.in_proj_kv(key) else: q # ... truncated (>4000 chars) for memory efficiency
Fire
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/td/ctdybbibnws4d7ukbk3fpn35zkgapxylowdhzwx7vgsllncbdrxa.py # Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d => convolution # x => relu # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu : [num_users=3] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/x2/cx2xvnzmnt63cxmn2w5numnmu3nfvs3w44zae4ys3rvoczt66fyt.py # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] # Source node to ATen node mapping: # cat => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%relu_1, %relu_2], 1), kwargs = {}) triton_poi_fused_cat_1 = async_compile.triton('triton_poi_fused_cat_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 16) % 8 x0 = xindex % 16 x2 = (xindex // 128) x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + (16*x1) + (64*x2)), tmp4 & xmask, other=0.0) tmp6 = tl.load(in_ptr1 + (x1), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full([1], 0, tl.int32) tmp9 = triton_helpers.maximum(tmp8, tmp7) tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype) tmp11 = tl.where(tmp4, tmp9, tmp10) tmp12 = tmp0 >= tmp3 tmp13 = tl.full([1], 8, tl.int64) tmp14 = tmp0 < tmp13 tmp15 = tl.load(in_ptr2 + (x0 + (16*((-4) + x1)) + (64*x2)), tmp12 & xmask, other=0.0) tmp16 = tl.load(in_ptr3 + ((-4) + x1), tmp12 & xmask, eviction_policy='evict_last', other=0.0) tmp17 = tmp15 + tmp16 tmp18 = triton_helpers.maximum(tmp8, tmp17) tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype) tmp20 = tl.where(tmp12, tmp18, tmp19) tmp21 = tl.where(tmp4, tmp11, tmp20) tl.store(out_ptr0 + (x3), tmp21, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/35/c35vxy5oullorpkglb324ij376h6o5mbjsqjoykh7yiskiyinasn.py # Topologically Sorted Source Nodes: [conv2d_2, relu_2], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # conv2d_2 => convolution_2 # relu_2 => relu_2 # Graph fragment: # %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_6, %primals_7, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_2,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_2, 0), kwargs = {}) triton_poi_fused_convolution_relu_threshold_backward_2 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 4 tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x3), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_7, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 256, grid=grid(256), stream=stream0) del primals_2 # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1)) # Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution] buf3 = extern_kernels.convolution(buf1, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1)) buf4 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] triton_poi_fused_cat_1.run(buf2, primals_5, buf3, primals_7, buf4, 512, grid=grid(512), stream=stream0) buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [conv2d_2, relu_2], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] triton_poi_fused_convolution_relu_threshold_backward_2.run(buf3, primals_7, buf5, 256, grid=grid(256), stream=stream0) del buf3 del primals_7 buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [conv2d_1, relu_1], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] triton_poi_fused_convolution_relu_threshold_backward_2.run(buf2, primals_5, buf6, 256, grid=grid(256), stream=stream0) del buf2 del primals_5 return (buf4, primals_1, primals_3, primals_4, primals_6, buf1, buf5, buf6, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.onnx import torch import torch.nn as nn class Fire(nn.Module): def __init__(self, inplanes, squeeze_planes, expand1x1_planes, expand3x3_planes): super(Fire, self).__init__() self.inplanes = inplanes self.squeeze = nn.Conv2d(inplanes, squeeze_planes, kernel_size=1) self.squeeze_activation = nn.ReLU(inplace=True) self.expand1x1 = nn.Conv2d(squeeze_planes, expand1x1_planes, kernel_size=1) self.expand1x1_activation = nn.ReLU(inplace=True) self.expand3x3 = nn.Conv2d(squeeze_planes, expand3x3_planes, kernel_size=3, padding=1) self.expand3x3_activation = nn.ReLU(inplace=True) def forward(self, x): x = self.squeeze_activation(self.squeeze(x)) return torch.cat([self.expand1x1_activation(self.expand1x1(x)), self.expand3x3_activation(self.expand3x3(x))], 1) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'inplanes': 4, 'squeeze_planes': 4, 'expand1x1_planes': 4, 'expand3x3_planes': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.onnx import torch import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_cat_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 % 8 x0 = xindex % 16 x2 = xindex // 128 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0) tmp6 = tl.load(in_ptr1 + x1, tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full([1], 0, tl.int32) tmp9 = triton_helpers.maximum(tmp8, tmp7) tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype) tmp11 = tl.where(tmp4, tmp9, tmp10) tmp12 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp15 = tl.load(in_ptr2 + (x0 + 16 * (-4 + x1) + 64 * x2), tmp12 & xmask, other=0.0) tmp16 = tl.load(in_ptr3 + (-4 + x1), tmp12 & xmask, eviction_policy= 'evict_last', other=0.0) tmp17 = tmp15 + tmp16 tmp18 = triton_helpers.maximum(tmp8, tmp17) tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype) tmp20 = tl.where(tmp12, tmp18, tmp19) tmp21 = tl.where(tmp4, tmp11, tmp20) tl.store(out_ptr0 + x3, tmp21, xmask) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + x3, tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(256)](buf1, primals_2, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1)) buf3 = extern_kernels.convolution(buf1, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1)) buf4 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32) triton_poi_fused_cat_1[grid(512)](buf2, primals_5, buf3, primals_7, buf4, 512, XBLOCK=256, num_warps=4, num_stages=1) buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_2[grid(256)](buf3, primals_7, buf5, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf3 del primals_7 buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_2[grid(256)](buf2, primals_5, buf6, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf2 del primals_5 return buf4, primals_1, primals_3, primals_4, primals_6, buf1, buf5, buf6 class FireNew(nn.Module): def __init__(self, inplanes, squeeze_planes, expand1x1_planes, expand3x3_planes): super(FireNew, self).__init__() self.inplanes = inplanes self.squeeze = nn.Conv2d(inplanes, squeeze_planes, kernel_size=1) self.squeeze_activation = nn.ReLU(inplace=True) self.expand1x1 = nn.Conv2d(squeeze_planes, expand1x1_planes, kernel_size=1) self.expand1x1_activation = nn.ReLU(inplace=True) self.expand3x3 = nn.Conv2d(squeeze_planes, expand3x3_planes, kernel_size=3, padding=1) self.expand3x3_activation = nn.ReLU(inplace=True) def forward(self, input_0): primals_1 = self.squeeze.weight primals_2 = self.squeeze.bias primals_4 = self.expand1x1.weight primals_5 = self.expand1x1.bias primals_6 = self.expand3x3.weight primals_7 = self.expand3x3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
AndySer37/pytorch-ssd-mobile
Fire
false
2,004
[ "MIT" ]
0
ec4935940ffa374edc1e9a7009c279e727e548d7
https://github.com/AndySer37/pytorch-ssd-mobile/tree/ec4935940ffa374edc1e9a7009c279e727e548d7
import torch import torch.onnx import torch import torch.nn as nn class Model(nn.Module): def __init__(self, inplanes, squeeze_planes, expand1x1_planes, expand3x3_planes): super().__init__() self.inplanes = inplanes self.squeeze = nn.Conv2d(inplanes, squeeze_planes, kernel_size=1) self.squeeze_activation = nn.ReLU(inplace=True) self.expand1x1 = nn.Conv2d(squeeze_planes, expand1x1_planes, kernel_size=1) self.expand1x1_activation = nn.ReLU(inplace=True) self.expand3x3 = nn.Conv2d(squeeze_planes, expand3x3_planes, kernel_size=3, padding=1) self.expand3x3_activation = nn.ReLU(inplace=True) def forward(self, x): x = self.squeeze_activation(self.squeeze(x)) return torch.cat([self.expand1x1_activation(self.expand1x1(x)), self.expand3x3_activation(self.expand3x3(x))], 1) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'inplanes': 4, 'squeeze_planes': 4, 'expand1x1_planes': 4, 'expand3x3_planes': 4}]
CNormalized_Linear
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/ef/cefxo2fn2nc6kz3xqsftatxgyvfkkm7bbs7raicfjscwteq4iixh.py # Topologically Sorted Source Nodes: [pow_1, sum_1, sqrt, div], Original ATen: [aten.pow, aten.sum, aten.sqrt, aten.div] # Source node to ATen node mapping: # div => div # pow_1 => pow_1 # sqrt => sqrt # sum_1 => sum_1 # Graph fragment: # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_1, 2), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [0]), kwargs = {}) # %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%sum_1,), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_1, %sqrt), kwargs = {}) triton_poi_fused_div_pow_sqrt_sum_0 = async_compile.triton('triton_poi_fused_div_pow_sqrt_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_pow_sqrt_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_pow_sqrt_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (12 + x0), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = tmp0 / tmp12 tl.store(out_ptr0 + (x2), tmp13, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [pow_1, sum_1, sqrt, div], Original ATen: [aten.pow, aten.sum, aten.sqrt, aten.div] stream0 = get_raw_stream(0) triton_poi_fused_div_pow_sqrt_sum_0.run(primals_1, buf0, 16, grid=grid(16), stream=stream0) buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(buf0, (4, 4), (1, 4), 0), out=buf1) del buf0 return (reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0), primals_1, reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch import torch as th class CNormalized_Linear(th.nn.Module): """Linear layer with column-wise normalized input matrix.""" def __init__(self, in_features, out_features, bias=False): """Initialize the layer.""" super(CNormalized_Linear, self).__init__() self.in_features = in_features self.out_features = out_features self.weight = th.nn.Parameter(th.Tensor(out_features, in_features)) if bias: self.bias = th.nn.Parameter(th.Tensor(out_features)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): """Reset the parameters.""" stdv = 1.0 / math.sqrt(self.weight.size(1)) self.weight.data.uniform_(-stdv, stdv) if self.bias is not None: self.bias.data.uniform_(-stdv, stdv) def forward(self, input): """Feed-forward through the network.""" return th.nn.functional.linear(input, self.weight.div(self.weight. pow(2).sum(0).sqrt())) def __repr__(self): """For print purposes.""" return self.__class__.__name__ + '(' + 'in_features=' + str(self. in_features) + ', out_features=' + str(self.out_features ) + ', bias=' + str(self.bias is not None) + ')' def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_features': 4, 'out_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import math import torch as th assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_div_pow_sqrt_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (12 + x0), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = tmp0 / tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_pow_sqrt_sum_0[grid(16)](primals_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(buf0, (4, 4), (1, 4), 0), out=buf1) del buf0 return reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), primals_1, reinterpret_tensor(primals_2, (64, 4), (4, 1), 0) class CNormalized_LinearNew(th.nn.Module): """Linear layer with column-wise normalized input matrix.""" def __init__(self, in_features, out_features, bias=False): """Initialize the layer.""" super(CNormalized_LinearNew, self).__init__() self.in_features = in_features self.out_features = out_features self.weight = th.nn.Parameter(th.Tensor(out_features, in_features)) if bias: self.bias = th.nn.Parameter(th.Tensor(out_features)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): """Reset the parameters.""" stdv = 1.0 / math.sqrt(self.weight.size(1)) self.weight.data.uniform_(-stdv, stdv) if self.bias is not None: self.bias.data.uniform_(-stdv, stdv) def __repr__(self): """For print purposes.""" return self.__class__.__name__ + '(' + 'in_features=' + str(self. in_features) + ', out_features=' + str(self.out_features ) + ', bias=' + str(self.bias is not None) + ')' def forward(self, input_0): primals_1 = self.weight primals_2 = input_0 output = call([primals_1, primals_2]) return output[0]
BadrYoubiIdrissi/CausalDiscoveryToolbox
CNormalized_Linear
false
2,005
[ "MIT" ]
0
1e729d002a64ea1942caecd21b9dc8cc217ea0e2
https://github.com/BadrYoubiIdrissi/CausalDiscoveryToolbox/tree/1e729d002a64ea1942caecd21b9dc8cc217ea0e2
import math import torch import torch as th class Model(th.nn.Module): """Linear layer with column-wise normalized input matrix.""" def __init__(self, in_features, out_features, bias=False): """Initialize the layer.""" super().__init__() self.in_features = in_features self.out_features = out_features self.weight = th.nn.Parameter(th.Tensor(out_features, in_features)) if bias: self.bias = th.nn.Parameter(th.Tensor(out_features)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): """Reset the parameters.""" stdv = 1.0 / math.sqrt(self.weight.size(1)) self.weight.data.uniform_(-stdv, stdv) if self.bias is not None: self.bias.data.uniform_(-stdv, stdv) def forward(self, input): """Feed-forward through the network.""" return th.nn.functional.linear(input, self.weight.div(self.weight. pow(2).sum(0).sqrt())) def __repr__(self): """For print purposes.""" return self.__class__.__name__ + '(' + 'in_features=' + str(self. in_features) + ', out_features=' + str(self.out_features ) + ', bias=' + str(self.bias is not None) + ')' def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 4]
ASP
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/r3/cr3febcwm3t44fuoitsx3ou2p6xg4sk4f7unagmmrvffasxf47te.py # Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # relu => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_3,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, xmask) tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/3v/c3vbbnaoh2ala54xhjzwr7f44xb5tmg7hvdni6ytelrhdlekfg4j.py # Topologically Sorted Source Nodes: [att_logits_1, softmax], Original ATen: [aten.add, aten._softmax] # Source node to ATen node mapping: # att_logits_1 => add # softmax => amax, exp, sub, sum_1 # Graph fragment: # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_8, %squeeze), kwargs = {}) # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%add, [-1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) triton_poi_fused__softmax_add_1 = async_compile.triton('triton_poi_fused__softmax_add_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_add_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_add_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_ptr0 + (4*x2), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + (4*x2)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + (4*x2)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = triton_helpers.maximum(tmp2, tmp5) tmp9 = tmp7 + tmp8 tmp10 = triton_helpers.maximum(tmp6, tmp9) tmp13 = tmp11 + tmp12 tmp14 = triton_helpers.maximum(tmp10, tmp13) tmp15 = tmp2 - tmp14 tmp16 = tl_math.exp(tmp15) tmp17 = tmp5 - tmp14 tmp18 = tl_math.exp(tmp17) tmp19 = tmp16 + tmp18 tmp20 = tmp9 - tmp14 tmp21 = tl_math.exp(tmp20) tmp22 = tmp19 + tmp21 tmp23 = tmp13 - tmp14 tmp24 = tl_math.exp(tmp23) tmp25 = tmp22 + tmp24 tl.store(out_ptr0 + (x2), tmp14, xmask) tl.store(out_ptr1 + (x2), tmp25, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/ec/cecsiupxfmmb3m5bbivf5bciiq2k5ishu3qab2b2u6oieray7t5b.py # Topologically Sorted Source Nodes: [mul_1], Original ATen: [aten.mul] # Source node to ATen node mapping: # mul_1 => mul_1 # Graph fragment: # %mul_1 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%unsqueeze, %view_1), kwargs = {}) triton_poi_fused_mul_2 = async_compile.triton('triton_poi_fused_mul_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = (xindex // 4) x5 = (xindex // 4) % 64 x7 = (xindex // 16) x8 = xindex % 256 x9 = xindex tmp0 = tl.load(in_ptr0 + (x4), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x5), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x7), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr3 + (x7), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr4 + (x8), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp5 = tl_math.exp(tmp4) tmp7 = tmp5 / tmp6 tmp9 = tmp7 * tmp8 tl.store(out_ptr0 + (x9), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/pk/cpky4ikacz2hgolxoehy6foq57gecnbejjobg6du4jubou3tbafn.py # Topologically Sorted Source Nodes: [mul, utter_rep, mul_2, sum_2, pow_1, sub, add_1, variance], Original ATen: [aten.mul, aten.sum, aten.pow, aten.sub, aten.add, aten.sqrt] # Source node to ATen node mapping: # add_1 => add_1 # mul => mul # mul_2 => mul_2 # pow_1 => pow_1 # sub => sub_1 # sum_2 => sum_3 # utter_rep => sum_2 # variance => sqrt # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, %unsqueeze), kwargs = {}) # %sum_2 : [num_users=3] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_1, %view_1), kwargs = {}) # %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_2, [1]), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_2, 2), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_3, %pow_1), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub_1, 1e-08), kwargs = {}) # %sqrt : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_1,), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Scalar](args = (%sqrt, 2), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_2, 1.0), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Scalar](args = (%pow_2, 2.0), kwargs = {}) triton_poi_fused_add_mul_pow_sqrt_sub_sum_3 = async_compile.triton('triton_poi_fused_add_mul_pow_sqrt_sub_sum_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 8, 9, 10), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_pow_sqrt_sub_sum_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 24, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_pow_sqrt_sub_sum_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, out_ptr2, out_ptr3, out_ptr4, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x6 = xindex % 64 x3 = (xindex // 64) x4 = (xindex // 4) % 16 x2 = (xindex // 16) % 4 x0 = xindex % 4 x5 = (xindex // 4) x8 = xindex tmp0 = tl.load(in_ptr0 + (x6), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x4 + (64*x3)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + (x4), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr3 + (x2 + (16*x3)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + (x2 + (16*x3)), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr0 + (64 + x6), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr1 + (16 + x4 + (64*x3)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr2 + (16 + x4), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr3 + (4 + x2 + (16*x3)), xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr4 + (4 + x2 + (16*x3)), xmask, eviction_policy='evict_last') tmp21 = tl.load(in_ptr0 + (128 + x6), xmask, eviction_policy='evict_last') tmp22 = tl.load(in_ptr1 + (32 + x4 + (64*x3)), xmask, eviction_policy='evict_last') tmp23 = tl.load(in_ptr2 + (32 + x4), xmask, eviction_policy='evict_last') tmp25 = tl.load(in_ptr3 + (8 + x2 + (16*x3)), xmask, eviction_policy='evict_last') tmp28 = tl.load(in_ptr4 + (8 + x2 + (16*x3)), xmask, eviction_policy='evict_last') tmp32 = tl.load(in_ptr0 + (192 + x6), xmask, eviction_policy='evict_last') tmp33 = tl.load(in_ptr1 + (48 + x4 + (64*x3)), xmask, eviction_policy='evict_last') tmp34 = tl.load(in_ptr2 + (48 + x4), xmask, eviction_policy='evict_last') tmp36 = tl.load(in_ptr3 + (12 + x2 + (16*x3)), xmask, eviction_policy='evict_last') tmp39 = tl.load(in_ptr4 + (12 + x2 + (16*x3)), xmask, eviction_policy='evict_last') tmp43 = tl.load(in_ptr5 + (x6 + (256*x3)), xmask) tmp45 = tl.load(in_ptr5 + (64 + x6 + (256*x3)), xmask) tmp48 = tl.load(in_ptr5 + (128 + x6 + (256*x3)), xmask) tmp51 = tl.load(in_ptr5 + (192 + x6 + (256*x3)), xmask) tmp3 = tmp1 + tmp2 tmp5 = tmp3 - tmp4 tmp6 = tl_math.exp(tmp5) tmp8 = tmp6 / tmp7 tmp9 = tmp0 * tmp8 tmp13 = tmp11 + tmp12 tmp15 = tmp13 - tmp14 tmp16 = tl_math.exp(tmp15) tmp18 = tmp16 / tmp17 tmp19 = tmp10 * tmp18 tmp20 = tmp9 + tmp19 tmp24 = tmp22 + tmp23 tmp26 = tmp24 - tmp25 tmp27 = tl_math.exp(tmp26) tmp29 = tmp27 / tmp28 tmp30 = tmp21 * tmp29 tmp31 = tmp20 + tmp30 tmp35 = tmp33 + tmp34 tmp37 = tmp35 - tmp36 tmp38 = tl_math.exp(tmp37) tmp40 = tmp38 / tmp39 tmp41 = tmp32 * tmp40 tmp42 = tmp31 + tmp41 tmp44 = tmp43 * tmp0 tmp46 = tmp45 * tmp10 tmp47 = tmp44 + tmp46 tmp49 = tmp48 * tmp21 tmp50 = tmp47 + tmp49 tmp52 = tmp51 * tmp32 tmp53 = tmp50 + tmp52 tmp54 = tmp42 * tmp42 tmp55 = tmp53 - tmp54 tmp56 = 1e-08 tmp57 = tmp55 + tmp56 tmp58 = libdevice.sqrt(tmp57) tmp59 = 2.0 tmp60 = tmp58 * tmp59 tmp61 = tmp42 * tmp59 tl.store(out_ptr0 + (x0 + (8*x5)), tmp42, xmask) tl.store(out_ptr2 + (x0 + (8*x5)), tmp58, xmask) tl.store(out_ptr3 + (x8), tmp60, xmask) tl.store(out_ptr4 + (x8), tmp61, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (1, 4), (4, 1)) assert_size_stride(primals_7, (1, ), (1, )) assert_size_stride(primals_8, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [feature_BxTxH], Original ATen: [aten.addmm] extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf0, reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) buf2 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf1 # reuse buf14 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0.run(buf2, primals_5, buf14, 256, grid=grid(256), stream=stream0) del primals_5 buf4 = empty_strided_cuda((64, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.addmm] extern_kernels.addmm(primals_7, reinterpret_tensor(buf2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf4) del primals_7 buf5 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf6 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) # Topologically Sorted Source Nodes: [att_logits_1, softmax], Original ATen: [aten.add, aten._softmax] triton_poi_fused__softmax_add_1.run(primals_8, buf4, buf5, buf6, 64, grid=grid(64), stream=stream0) buf8 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul_1], Original ATen: [aten.mul] triton_poi_fused_mul_2.run(primals_8, buf4, buf5, buf6, buf0, buf8, 1024, grid=grid(1024), stream=stream0) buf11 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.float32) buf7 = reinterpret_tensor(buf11, (4, 4, 4, 4), (128, 32, 8, 1), 0) # alias buf10 = reinterpret_tensor(buf11, (4, 4, 4, 4), (128, 32, 8, 1), 4) # alias buf12 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf13 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul, utter_rep, mul_2, sum_2, pow_1, sub, add_1, variance], Original ATen: [aten.mul, aten.sum, aten.pow, aten.sub, aten.add, aten.sqrt] triton_poi_fused_add_mul_pow_sqrt_sub_sum_3.run(buf0, primals_8, buf4, buf5, buf6, buf8, buf7, buf10, buf12, buf13, 256, grid=grid(256), stream=stream0) del buf5 del buf6 return (buf11, primals_8, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0, reinterpret_tensor(buf2, (64, 4), (4, 1), 0), buf4, buf8, buf12, buf13, primals_6, buf14, primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class AttentivePooling(nn.Module): """ Implementation of Attentive Pooling """ def __init__(self, input_dim, **kwargs): super(AttentivePooling, self).__init__() self.W_a = nn.Linear(input_dim, input_dim) self.W = nn.Linear(input_dim, 1) self.act_fn = nn.ReLU() self.softmax = nn.functional.softmax def forward(self, batch_rep, att_mask): """ input: batch_rep : size (B, T, H), B: batch size, T: sequence length, H: Hidden dimension attention_weight: att_w : size (B, T, 1) return: utter_rep: size (B, H) """ att_logits = self.W(self.act_fn(self.W_a(batch_rep))).squeeze(-1) att_logits = att_mask + att_logits att_w = self.softmax(att_logits, dim=-1).unsqueeze(-1) utter_rep = torch.sum(batch_rep * att_w, dim=1) return utter_rep, att_w class ASP(nn.Module): """ Attentive Statistic Pooling module incoporate attention mask""" def __init__(self, out_dim, input_dim): super(ASP, self).__init__() self.linear = nn.Linear(input_dim, out_dim) self.ap_layer = AttentivePooling(out_dim) def forward(self, feature_BxTxH, att_mask_BxT): """ Arguments feature_BxTxH - [BxTxH] Acoustic feature with shape att_mask_BxT - [BxT] Attention Mask logits """ feature_BxTxH = self.linear(feature_BxTxH) sap_vec, att_w = self.ap_layer(feature_BxTxH, att_mask_BxT) variance = torch.sqrt(torch.sum(att_w * feature_BxTxH * feature_BxTxH, dim=1) - sap_vec ** 2 + 1e-08) statistic_pooling = torch.cat([sap_vec, variance], dim=-1) return statistic_pooling def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'out_dim': 4, 'input_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused__softmax_add_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_ptr0 + 4 * x2, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x2), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x2), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = triton_helpers.maximum(tmp2, tmp5) tmp9 = tmp7 + tmp8 tmp10 = triton_helpers.maximum(tmp6, tmp9) tmp13 = tmp11 + tmp12 tmp14 = triton_helpers.maximum(tmp10, tmp13) tmp15 = tmp2 - tmp14 tmp16 = tl_math.exp(tmp15) tmp17 = tmp5 - tmp14 tmp18 = tl_math.exp(tmp17) tmp19 = tmp16 + tmp18 tmp20 = tmp9 - tmp14 tmp21 = tl_math.exp(tmp20) tmp22 = tmp19 + tmp21 tmp23 = tmp13 - tmp14 tmp24 = tl_math.exp(tmp23) tmp25 = tmp22 + tmp24 tl.store(out_ptr0 + x2, tmp14, xmask) tl.store(out_ptr1 + x2, tmp25, xmask) @triton.jit def triton_poi_fused_mul_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex // 4 x5 = xindex // 4 % 64 x7 = xindex // 16 x8 = xindex % 256 x9 = xindex tmp0 = tl.load(in_ptr0 + x4, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x5, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x7, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr3 + x7, xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr4 + x8, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp5 = tl_math.exp(tmp4) tmp7 = tmp5 / tmp6 tmp9 = tmp7 * tmp8 tl.store(out_ptr0 + x9, tmp9, xmask) @triton.jit def triton_poi_fused_add_mul_pow_sqrt_sub_sum_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, out_ptr2, out_ptr3, out_ptr4, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x6 = xindex % 64 x3 = xindex // 64 x4 = xindex // 4 % 16 x2 = xindex // 16 % 4 x0 = xindex % 4 x5 = xindex // 4 x8 = xindex tmp0 = tl.load(in_ptr0 + x6, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x4 + 64 * x3), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr2 + x4, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr3 + (x2 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp7 = tl.load(in_ptr4 + (x2 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp10 = tl.load(in_ptr0 + (64 + x6), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr1 + (16 + x4 + 64 * x3), xmask, eviction_policy= 'evict_last') tmp12 = tl.load(in_ptr2 + (16 + x4), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr3 + (4 + x2 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp17 = tl.load(in_ptr4 + (4 + x2 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp21 = tl.load(in_ptr0 + (128 + x6), xmask, eviction_policy='evict_last') tmp22 = tl.load(in_ptr1 + (32 + x4 + 64 * x3), xmask, eviction_policy= 'evict_last') tmp23 = tl.load(in_ptr2 + (32 + x4), xmask, eviction_policy='evict_last') tmp25 = tl.load(in_ptr3 + (8 + x2 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp28 = tl.load(in_ptr4 + (8 + x2 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp32 = tl.load(in_ptr0 + (192 + x6), xmask, eviction_policy='evict_last') tmp33 = tl.load(in_ptr1 + (48 + x4 + 64 * x3), xmask, eviction_policy= 'evict_last') tmp34 = tl.load(in_ptr2 + (48 + x4), xmask, eviction_policy='evict_last') tmp36 = tl.load(in_ptr3 + (12 + x2 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp39 = tl.load(in_ptr4 + (12 + x2 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp43 = tl.load(in_ptr5 + (x6 + 256 * x3), xmask) tmp45 = tl.load(in_ptr5 + (64 + x6 + 256 * x3), xmask) tmp48 = tl.load(in_ptr5 + (128 + x6 + 256 * x3), xmask) tmp51 = tl.load(in_ptr5 + (192 + x6 + 256 * x3), xmask) tmp3 = tmp1 + tmp2 tmp5 = tmp3 - tmp4 tmp6 = tl_math.exp(tmp5) tmp8 = tmp6 / tmp7 tmp9 = tmp0 * tmp8 tmp13 = tmp11 + tmp12 tmp15 = tmp13 - tmp14 tmp16 = tl_math.exp(tmp15) tmp18 = tmp16 / tmp17 tmp19 = tmp10 * tmp18 tmp20 = tmp9 + tmp19 tmp24 = tmp22 + tmp23 tmp26 = tmp24 - tmp25 tmp27 = tl_math.exp(tmp26) tmp29 = tmp27 / tmp28 tmp30 = tmp21 * tmp29 tmp31 = tmp20 + tmp30 tmp35 = tmp33 + tmp34 tmp37 = tmp35 - tmp36 tmp38 = tl_math.exp(tmp37) tmp40 = tmp38 / tmp39 tmp41 = tmp32 * tmp40 tmp42 = tmp31 + tmp41 tmp44 = tmp43 * tmp0 tmp46 = tmp45 * tmp10 tmp47 = tmp44 + tmp46 tmp49 = tmp48 * tmp21 tmp50 = tmp47 + tmp49 tmp52 = tmp51 * tmp32 tmp53 = tmp50 + tmp52 tmp54 = tmp42 * tmp42 tmp55 = tmp53 - tmp54 tmp56 = 1e-08 tmp57 = tmp55 + tmp56 tmp58 = libdevice.sqrt(tmp57) tmp59 = 2.0 tmp60 = tmp58 * tmp59 tmp61 = tmp42 * tmp59 tl.store(out_ptr0 + (x0 + 8 * x5), tmp42, xmask) tl.store(out_ptr2 + (x0 + 8 * x5), tmp58, xmask) tl.store(out_ptr3 + x8, tmp60, xmask) tl.store(out_ptr4 + x8, tmp61, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (1, 4), (4, 1)) assert_size_stride(primals_7, (1,), (1,)) assert_size_stride(primals_8, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(buf0, reinterpret_tensor(primals_4, (4, 4), (1, 4 ), 0), out=buf1) buf2 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf1 buf14 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(256)](buf2, primals_5, buf14, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf2, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_6, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf4) del primals_7 buf5 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf6 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) triton_poi_fused__softmax_add_1[grid(64)](primals_8, buf4, buf5, buf6, 64, XBLOCK=64, num_warps=1, num_stages=1) buf8 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) triton_poi_fused_mul_2[grid(1024)](primals_8, buf4, buf5, buf6, buf0, buf8, 1024, XBLOCK=256, num_warps=4, num_stages=1) buf11 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.float32 ) buf7 = reinterpret_tensor(buf11, (4, 4, 4, 4), (128, 32, 8, 1), 0) buf10 = reinterpret_tensor(buf11, (4, 4, 4, 4), (128, 32, 8, 1), 4) buf12 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf13 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_mul_pow_sqrt_sub_sum_3[grid(256)](buf0, primals_8, buf4, buf5, buf6, buf8, buf7, buf10, buf12, buf13, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf5 del buf6 return buf11, primals_8, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf0, reinterpret_tensor(buf2, (64, 4), (4, 1), 0 ), buf4, buf8, buf12, buf13, primals_6, buf14, primals_4 class AttentivePooling(nn.Module): """ Implementation of Attentive Pooling """ def __init__(self, input_dim, **kwargs): super(AttentivePooling, self).__init__() self.W_a = nn.Linear(input_dim, input_dim) self.W = nn.Linear(input_dim, 1) self.act_fn = nn.ReLU() self.softmax = nn.functional.softmax def forward(self, batch_rep, att_mask): """ input: batch_rep : size (B, T, H), B: batch size, T: sequence length, H: Hidden dimension attention_weight: att_w : size (B, T, 1) return: utter_rep: size (B, H) """ att_logits = self.W(self.act_fn(self.W_a(batch_rep))).squeeze(-1) att_logits = att_mask + att_logits att_w = self.softmax(att_logits, dim=-1).unsqueeze(-1) utter_rep = torch.sum(batch_rep * att_w, dim=1) return utter_rep, att_w class ASPNew(nn.Module): """ Attentive Statistic Pooling module incoporate attention mask""" def __init__(self, out_dim, input_dim): super(ASPNew, self).__init__() self.linear = nn.Linear(input_dim, out_dim) self.ap_layer = AttentivePooling(out_dim) def forward(self, input_0, input_1): primals_1 = self.linear.weight primals_2 = self.linear.bias primals_4 = self.ap_layer.W_a.weight primals_5 = self.ap_layer.W_a.bias primals_6 = self.ap_layer.W.weight primals_7 = self.ap_layer.W.bias primals_3 = input_0 primals_8 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return output[0]
AyushExel/s3prl
ASP
false
2,006
[ "MIT" ]
0
6531904e9621a778978b9cfef3ba9f582e56639a
https://github.com/AyushExel/s3prl/tree/6531904e9621a778978b9cfef3ba9f582e56639a
import torch import torch.nn as nn class AttentivePooling(nn.Module): """ Implementation of Attentive Pooling """ def __init__(self, input_dim, **kwargs): super().__init__() self.W_a = nn.Linear(input_dim, input_dim) self.W = nn.Linear(input_dim, 1) self.act_fn = nn.ReLU() self.softmax = nn.functional.softmax def forward(self, batch_rep, att_mask): """ input: batch_rep : size (B, T, H), B: batch size, T: sequence length, H: Hidden dimension attention_weight: att_w : size (B, T, 1) return: utter_rep: size (B, H) """ att_logits = self.W(self.act_fn(self.W_a(batch_rep))).squeeze(-1) att_logits = att_mask + att_logits att_w = self.softmax(att_logits, dim=-1).unsqueeze(-1) utter_rep = torch.sum(batch_rep * att_w, dim=1) return utter_rep, att_w class Model(nn.Module): """ Attentive Statistic Pooling module incoporate attention mask""" def __init__(self, out_dim, input_dim): super().__init__() self.linear = nn.Linear(input_dim, out_dim) self.ap_layer = AttentivePooling(out_dim) def forward(self, feature_BxTxH, att_mask_BxT): """ Arguments feature_BxTxH - [BxTxH] Acoustic feature with shape att_mask_BxT - [BxT] Attention Mask logits """ feature_BxTxH = self.linear(feature_BxTxH) sap_vec, att_w = self.ap_layer(feature_BxTxH, att_mask_BxT) variance = torch.sqrt(torch.sum(att_w * feature_BxTxH * feature_BxTxH, dim=1) - sap_vec ** 2 + 1e-08) statistic_pooling = torch.cat([sap_vec, variance], dim=-1) return statistic_pooling def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 4]
LinearDiag
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/2w/c2wdecu57d6a6kpjohz37lvi6a425csunsyc44s565n66u3hny6i.py # Topologically Sorted Source Nodes: [out], Original ATen: [aten.mul] # Source node to ATen node mapping: # out => mul # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, %expand), kwargs = {}) triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + (x2), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [out], Original ATen: [aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_mul_0.run(primals_1, primals_2, buf0, 16, grid=grid(16), stream=stream0) del primals_2 return (buf0, primals_1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.optim import torch.nn.parallel class LinearDiag(nn.Module): def __init__(self, num_features, bias=False): super(LinearDiag, self).__init__() weight = torch.FloatTensor(num_features).fill_(1) self.weight = nn.Parameter(weight, requires_grad=True) if bias: bias = torch.FloatTensor(num_features).fill_(0) self.bias = nn.Parameter(bias, requires_grad=True) else: self.register_parameter('bias', None) def forward(self, X): assert X.dim() == 2 and X.size(1) == self.weight.size(0) out = X * self.weight.expand_as(X) if self.bias is not None: out = out + self.bias.expand_as(out) return out def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'num_features': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.optim import torch.nn.parallel assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x2, tmp2, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(16)](primals_1, primals_2, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_2 return buf0, primals_1 class LinearDiagNew(nn.Module): def __init__(self, num_features, bias=False): super(LinearDiagNew, self).__init__() weight = torch.FloatTensor(num_features).fill_(1) self.weight = nn.Parameter(weight, requires_grad=True) if bias: bias = torch.FloatTensor(num_features).fill_(0) self.bias = nn.Parameter(bias, requires_grad=True) else: self.register_parameter('bias', None) def forward(self, input_0): primals_2 = self.weight primals_1 = input_0 output = call([primals_1, primals_2]) return output[0]
Basasuya/FewShotWithoutForgetting
LinearDiag
false
2,007
[ "MIT" ]
0
eecc70e416ed82999124ddfca1b145f6dbcd74a6
https://github.com/Basasuya/FewShotWithoutForgetting/tree/eecc70e416ed82999124ddfca1b145f6dbcd74a6
import torch import torch.nn as nn import torch.optim import torch.nn.parallel class Model(nn.Module): def __init__(self, num_features, bias=False): super().__init__() weight = torch.FloatTensor(num_features).fill_(1) self.weight = nn.Parameter(weight, requires_grad=True) if bias: bias = torch.FloatTensor(num_features).fill_(0) self.bias = nn.Parameter(bias, requires_grad=True) else: self.register_parameter('bias', None) def forward(self, X): assert X.dim() == 2 and X.size(1) == self.weight.size(0) out = X * self.weight.expand_as(X) if self.bias is not None: out = out + self.bias.expand_as(out) return out def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [4]
Discriminator
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/fl/cfl47w3jenzjato5bn5xjoj5tva4lgt6os2dcyaczcbvy2k2fx2y.py # Topologically Sorted Source Nodes: [logits], Original ATen: [aten.cat] # Source node to ATen node mapping: # logits => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%squeeze, %squeeze_1],), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 16) x0 = xindex % 16 x2 = xindex tmp6 = tl.load(in_ptr1 + (0)) tmp7 = tl.broadcast_to(tmp6, [XBLOCK]) tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + (16*x1)), tmp4 & xmask, other=0.0) tmp8 = tmp5 + tmp7 tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype) tmp10 = tl.where(tmp4, tmp8, tmp9) tmp11 = tmp0 >= tmp3 tmp12 = tl.full([1], 8, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tl.load(in_ptr2 + (x0 + (16*((-4) + x1))), tmp11 & xmask, other=0.0) tmp15 = tmp14 + tmp7 tmp16 = tl.full(tmp15.shape, 0.0, tmp15.dtype) tmp17 = tl.where(tmp11, tmp15, tmp16) tmp18 = tl.where(tmp4, tmp10, tmp17) tl.store(out_ptr0 + (x2), tmp18, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (1, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (1, ), (1, )) assert_size_stride(primals_5, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [bilinear], Original ATen: [aten._trilinear] buf0 = torch.ops.aten._trilinear.default(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), primals_3, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), [1, 3], [0], [1, 2], [2, 3]) buf1 = buf0 del buf0 # Topologically Sorted Source Nodes: [bilinear_1], Original ATen: [aten._trilinear] buf2 = torch.ops.aten._trilinear.default(reinterpret_tensor(primals_5, (64, 4), (4, 1), 0), primals_3, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), [1, 3], [0], [1, 2], [2, 3]) del primals_3 buf3 = buf2 del buf2 buf4 = empty_strided_cuda((8, 4, 4, 1), (16, 4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [logits], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(buf1, primals_4, buf3, buf4, 128, grid=grid(128), stream=stream0) del buf1 del buf3 del primals_4 return (buf4, reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_5, (64, 4), (4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((1, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class Discriminator(nn.Module): """ The discriminator .. math:: \\begin{equation} \\mathcal{D}\\left(\\mathbf{h}_{i}^{(r)}, \\mathbf{s}^{(r)}\\right)=\\sigma\\left(\\mathbf{h}_{i}^{(r) T} \\mathbf{M}^{(r)} \\mathbf{s}^{(r)}\\right) \\end{equation} where :math:`M^{(r)}` is a trainable scoring matrix. """ def __init__(self, n_h): super(Discriminator, self).__init__() self.f_k_bilinear = nn.Bilinear(n_h, n_h, 1) for m in self.modules(): self.weights_init(m) def weights_init(self, m): if isinstance(m, nn.Bilinear): torch.nn.init.xavier_uniform_(m.weight.data) if m.bias is not None: m.bias.data.fill_(0.0) def forward(self, c, h_pl, h_mi, s_bias1=None, s_bias2=None): c_x = c.expand_as(h_pl) sc_1 = torch.squeeze(self.f_k_bilinear(h_pl, c_x), 1) sc_2 = torch.squeeze(self.f_k_bilinear(h_mi, c_x), 1) if s_bias1 is not None: sc_1 += s_bias1 if s_bias2 is not None: sc_2 += s_bias2 logits = torch.cat((sc_1, sc_2), 0) return logits def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {'n_h': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 x0 = xindex % 16 x2 = xindex tmp6 = tl.load(in_ptr1 + 0) tmp7 = tl.broadcast_to(tmp6, [XBLOCK]) tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1), tmp4 & xmask, other=0.0) tmp8 = tmp5 + tmp7 tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype) tmp10 = tl.where(tmp4, tmp8, tmp9) tmp11 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp14 = tl.load(in_ptr2 + (x0 + 16 * (-4 + x1)), tmp11 & xmask, other=0.0) tmp15 = tmp14 + tmp7 tmp16 = tl.full(tmp15.shape, 0.0, tmp15.dtype) tmp17 = tl.where(tmp11, tmp15, tmp16) tmp18 = tl.where(tmp4, tmp10, tmp17) tl.store(out_ptr0 + x2, tmp18, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (1, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (1,), (1,)) assert_size_stride(primals_5, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = torch.ops.aten._trilinear.default(reinterpret_tensor( primals_2, (64, 4), (4, 1), 0), primals_3, reinterpret_tensor( primals_1, (64, 4), (4, 1), 0), [1, 3], [0], [1, 2], [2, 3]) buf1 = buf0 del buf0 buf2 = torch.ops.aten._trilinear.default(reinterpret_tensor( primals_5, (64, 4), (4, 1), 0), primals_3, reinterpret_tensor( primals_1, (64, 4), (4, 1), 0), [1, 3], [0], [1, 2], [2, 3]) del primals_3 buf3 = buf2 del buf2 buf4 = empty_strided_cuda((8, 4, 4, 1), (16, 4, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(128)](buf1, primals_4, buf3, buf4, 128, XBLOCK=128, num_warps=4, num_stages=1) del buf1 del buf3 del primals_4 return buf4, reinterpret_tensor(primals_2, (64, 4), (4, 1), 0 ), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0 ), reinterpret_tensor(primals_5, (64, 4), (4, 1), 0) class DiscriminatorNew(nn.Module): """ The discriminator .. math:: \\begin{equation} \\mathcal{D}\\left(\\mathbf{h}_{i}^{(r)}, \\mathbf{s}^{(r)}\\right)=\\sigma\\left(\\mathbf{h}_{i}^{(r) T} \\mathbf{M}^{(r)} \\mathbf{s}^{(r)}\\right) \\end{equation} where :math:`M^{(r)}` is a trainable scoring matrix. """ def __init__(self, n_h): super(DiscriminatorNew, self).__init__() self.f_k_bilinear = nn.Bilinear(n_h, n_h, 1) for m in self.modules(): self.weights_init(m) def weights_init(self, m): if isinstance(m, nn.Bilinear): torch.nn.init.xavier_uniform_(m.weight.data) if m.bias is not None: m.bias.data.fill_(0.0) def forward(self, input_0, input_1, input_2): primals_3 = self.f_k_bilinear.weight primals_4 = self.f_k_bilinear.bias primals_1 = input_0 primals_2 = input_1 primals_5 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
BUPTlfq/OpenHGNN
Discriminator
false
2,008
[ "Apache-2.0" ]
0
77041e68c33a8a42a2c187c6e42d85b81cbb25d3
https://github.com/BUPTlfq/OpenHGNN/tree/77041e68c33a8a42a2c187c6e42d85b81cbb25d3
import torch import torch.nn as nn class Model(nn.Module): """ The discriminator .. math:: \\begin{equation} \\mathcal{D}\\left(\\mathbf{h}_{i}^{(r)}, \\mathbf{s}^{(r)}\\right)=\\sigma\\left(\\mathbf{h}_{i}^{(r) T} \\mathbf{M}^{(r)} \\mathbf{s}^{(r)}\\right) \\end{equation} where :math:`M^{(r)}` is a trainable scoring matrix. """ def __init__(self, n_h): super().__init__() self.f_k_bilinear = nn.Bilinear(n_h, n_h, 1) for m in self.modules(): self.weights_init(m) def weights_init(self, m): if isinstance(m, nn.Bilinear): torch.nn.init.xavier_uniform_(m.weight.data) if m.bias is not None: m.bias.data.fill_(0.0) def forward(self, c, h_pl, h_mi, s_bias1=None, s_bias2=None): c_x = c.expand_as(h_pl) sc_1 = torch.squeeze(self.f_k_bilinear(h_pl, c_x), 1) sc_2 = torch.squeeze(self.f_k_bilinear(h_mi, c_x), 1) if s_bias1 is not None: sc_1 += s_bias1 if s_bias2 is not None: sc_2 += s_bias2 logits = torch.cat((sc_1, sc_2), 0) return logits def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [4]
SpatialPyramidPooling
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/vi/cvih4u25oo4wxubp2b2ew7b4rm3k7js5c5bl6pzxtrnzdkaxor7e.py # Topologically Sorted Source Nodes: [max_pool2d_2, features], Original ATen: [aten.max_pool2d_with_indices, aten.cat] # Source node to ATen node mapping: # features => cat # max_pool2d_2 => _low_memory_max_pool2d_with_offsets # Graph fragment: # %_low_memory_max_pool2d_with_offsets : [num_users=1] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%arg0_1, [5, 5], [1, 1], [2, 2], [1, 1], False), kwargs = {}) # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%getitem, %getitem_2, %getitem_4, %arg0_1], 1), kwargs = {}) triton_poi_fused_cat_max_pool2d_with_indices_0 = async_compile.triton('triton_poi_fused_cat_max_pool2d_with_indices_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_max_pool2d_with_indices_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 26, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_max_pool2d_with_indices_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 4) % 4 x0 = xindex % 4 x7 = xindex x3 = (xindex // 64) x4 = xindex % 64 tmp116 = tl.load(in_ptr0 + (x7), xmask) tmp0 = (-2) + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = (-2) + x0 tmp7 = tmp6 >= tmp1 tmp8 = tmp6 < tmp3 tmp9 = tmp7 & tmp8 tmp10 = tmp5 & tmp9 tmp11 = tl.load(in_ptr0 + ((-10) + x7), tmp10 & xmask, other=float("-inf")) tmp12 = (-1) + x0 tmp13 = tmp12 >= tmp1 tmp14 = tmp12 < tmp3 tmp15 = tmp13 & tmp14 tmp16 = tmp5 & tmp15 tmp17 = tl.load(in_ptr0 + ((-9) + x7), tmp16 & xmask, other=float("-inf")) tmp18 = triton_helpers.maximum(tmp17, tmp11) tmp19 = x0 tmp20 = tmp19 >= tmp1 tmp21 = tmp19 < tmp3 tmp22 = tmp20 & tmp21 tmp23 = tmp5 & tmp22 tmp24 = tl.load(in_ptr0 + ((-8) + x7), tmp23 & xmask, other=float("-inf")) tmp25 = triton_helpers.maximum(tmp24, tmp18) tmp26 = 1 + x0 tmp27 = tmp26 >= tmp1 tmp28 = tmp26 < tmp3 tmp29 = tmp27 & tmp28 tmp30 = tmp5 & tmp29 tmp31 = tl.load(in_ptr0 + ((-7) + x7), tmp30 & xmask, other=float("-inf")) tmp32 = triton_helpers.maximum(tmp31, tmp25) tmp33 = 2 + x0 tmp34 = tmp33 >= tmp1 tmp35 = tmp33 < tmp3 tmp36 = tmp34 & tmp35 tmp37 = tmp5 & tmp36 tmp38 = tl.load(in_ptr0 + ((-6) + x7), tmp37 & xmask, other=float("-inf")) tmp39 = triton_helpers.maximum(tmp38, tmp32) tmp40 = (-1) + x1 tmp41 = tmp40 >= tmp1 tmp42 = tmp40 < tmp3 tmp43 = tmp41 & tmp42 tmp44 = tmp43 & tmp9 tmp45 = tl.load(in_ptr0 + ((-6) + x7), tmp44 & xmask, other=float("-inf")) tmp46 = triton_helpers.maximum(tmp45, tmp39) tmp47 = tmp43 & tmp15 tmp48 = tl.load(in_ptr0 + ((-5) + x7), tmp47 & xmask, other=float("-inf")) tmp49 = triton_helpers.maximum(tmp48, tmp46) tmp50 = tmp43 & tmp22 tmp51 = tl.load(in_ptr0 + ((-4) + x7), tmp50 & xmask, other=float("-inf")) tmp52 = triton_helpers.maximum(tmp51, tmp49) tmp53 = tmp43 & tmp29 tmp54 = tl.load(in_ptr0 + ((-3) + x7), tmp53 & xmask, other=float("-inf")) tmp55 = triton_helpers.maximum(tmp54, tmp52) tmp56 = tmp43 & tmp36 tmp57 = tl.load(in_ptr0 + ((-2) + x7), tmp56 & xmask, other=float("-inf")) tmp58 = triton_helpers.maximum(tmp57, tmp55) tmp59 = x1 tmp60 = tmp59 >= tmp1 tmp61 = tmp59 < tmp3 tmp62 = tmp60 & tmp61 tmp63 = tmp62 & tmp9 tmp64 = tl.load(in_ptr0 + ((-2) + x7), tmp63 & xmask, other=float("-inf")) tmp65 = triton_helpers.maximum(tmp64, tmp58) tmp66 = tmp62 & tmp15 tmp67 = tl.load(in_ptr0 + ((-1) + x7), tmp66 & xmask, other=float("-inf")) tmp68 = triton_helpers.maximum(tmp67, tmp65) tmp69 = tmp62 & tmp22 tmp70 = tl.load(in_ptr0 + (x7), tmp69 & xmask, other=float("-inf")) tmp71 = triton_helpers.maximum(tmp70, tmp68) tmp72 = tmp62 & tmp29 tmp73 = tl.load(in_ptr0 + (1 + x7), tmp72 & xmask, other=float("-inf")) tmp74 = triton_helpers.maximum(tmp73, tmp71) tmp75 = tmp62 & tmp36 tmp76 = tl.load(in_ptr0 + (2 + x7), tmp75 & xmask, other=float("-inf")) tmp77 = triton_helpers.maximum(tmp76, tmp74) tmp78 = 1 + x1 tmp79 = tmp78 >= tmp1 tmp80 = tmp78 < tmp3 tmp81 = tmp79 & tmp80 tmp82 = tmp81 & tmp9 tmp83 = tl.load(in_ptr0 + (2 + x7), tmp82 & xmask, other=float("-inf")) tmp84 = triton_helpers.maximum(tmp83, tmp77) tmp85 = tmp81 & tmp15 tmp86 = tl.load(in_ptr0 + (3 + x7), tmp85 & xmask, other=float("-inf")) tmp87 = triton_helpers.maximum(tmp86, tmp84) tmp88 = tmp81 & tmp22 tmp89 = tl.load(in_ptr0 + (4 + x7), tmp88 & xmask, other=float("-inf")) tmp90 = triton_helpers.maximum(tmp89, tmp87) tmp91 = tmp81 & tmp29 tmp92 = tl.load(in_ptr0 + (5 + x7), tmp91 & xmask, other=float("-inf")) tmp93 = triton_helpers.maximum(tmp92, tmp90) tmp94 = tmp81 & tmp36 tmp95 = tl.load(in_ptr0 + (6 + x7), tmp94 & xmask, other=float("-inf")) tmp96 = triton_helpers.maximum(tmp95, tmp93) tmp97 = 2 + x1 tmp98 = tmp97 >= tmp1 tmp99 = tmp97 < tmp3 tmp100 = tmp98 & tmp99 tmp101 = tmp100 & tmp9 tmp102 = tl.load(in_ptr0 + (6 + x7), tmp101 & xmask, other=float("-inf")) tmp103 = triton_helpers.maximum(tmp102, tmp96) tmp104 = tmp100 & tmp15 tmp105 = tl.load(in_ptr0 + (7 + x7), tmp104 & xmask, other=float("-inf")) tmp106 = triton_helpers.maximum(tmp105, tmp103) tmp107 = tmp100 & tmp22 tmp108 = tl.load(in_ptr0 + (8 + x7), tmp107 & xmask, other=float("-inf")) tmp109 = triton_helpers.maximum(tmp108, tmp106) tmp110 = tmp100 & tmp29 tmp111 = tl.load(in_ptr0 + (9 + x7), tmp110 & xmask, other=float("-inf")) tmp112 = triton_helpers.maximum(tmp111, tmp109) tmp113 = tmp100 & tmp36 tmp114 = tl.load(in_ptr0 + (10 + x7), tmp113 & xmask, other=float("-inf")) tmp115 = triton_helpers.maximum(tmp114, tmp112) tl.store(out_ptr0 + (x4 + (256*x3)), tmp115, xmask) tl.store(out_ptr1 + (x4 + (256*x3)), tmp116, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/wu/cwuiwj6jpv44elf77w3mkg3fk7wv6xwrv2wzcyipfdadqsgr6dzt.py # Topologically Sorted Source Nodes: [features], Original ATen: [aten.cat] # Source node to ATen node mapping: # features => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%getitem, %getitem_2, %getitem_4, %arg0_1], 1), kwargs = {}) triton_poi_fused_cat_1 = async_compile.triton('triton_poi_fused_cat_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 x1 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x2), xmask) tl.store(out_ptr0 + (x0 + (256*x1)), tmp0, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [max_pool2d], Original ATen: [aten.max_pool2d_with_indices] buf0 = torch.ops.aten.max_pool2d_with_indices.default(arg0_1, [13, 13], [1, 1], [6, 6]) buf1 = buf0[0] del buf0 # Topologically Sorted Source Nodes: [max_pool2d_1], Original ATen: [aten.max_pool2d_with_indices] buf3 = torch.ops.aten.max_pool2d_with_indices.default(arg0_1, [9, 9], [1, 1], [4, 4]) buf4 = buf3[0] del buf3 buf10 = empty_strided_cuda((4, 16, 4, 4), (256, 16, 4, 1), torch.float32) buf6 = reinterpret_tensor(buf10, (4, 4, 4, 4), (256, 16, 4, 1), 128) # alias buf9 = reinterpret_tensor(buf10, (4, 4, 4, 4), (256, 16, 4, 1), 192) # alias # Topologically Sorted Source Nodes: [max_pool2d_2, features], Original ATen: [aten.max_pool2d_with_indices, aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_max_pool2d_with_indices_0.run(arg0_1, buf6, buf9, 256, grid=grid(256), stream=stream0) del arg0_1 buf7 = reinterpret_tensor(buf10, (4, 4, 4, 4), (256, 16, 4, 1), 0) # alias # Topologically Sorted Source Nodes: [features], Original ATen: [aten.cat] triton_poi_fused_cat_1.run(buf1, buf7, 256, grid=grid(256), stream=stream0) del buf1 buf8 = reinterpret_tensor(buf10, (4, 4, 4, 4), (256, 16, 4, 1), 64) # alias # Topologically Sorted Source Nodes: [features], Original ATen: [aten.cat] triton_poi_fused_cat_1.run(buf4, buf8, 256, grid=grid(256), stream=stream0) del buf4 return (buf10, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class SpatialPyramidPooling(nn.Module): def __init__(self, pool_sizes=[5, 9, 13]): super(SpatialPyramidPooling, self).__init__() self.maxpools = nn.ModuleList([nn.MaxPool2d(pool_size, 1, pool_size // 2) for pool_size in pool_sizes]) def forward(self, x): features = [maxpool(x) for maxpool in self.maxpools[::-1]] features = torch.cat(features + [x], dim=1) return features def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_max_pool2d_with_indices_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 4 x0 = xindex % 4 x7 = xindex x3 = xindex // 64 x4 = xindex % 64 tmp116 = tl.load(in_ptr0 + x7, xmask) tmp0 = -2 + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = -2 + x0 tmp7 = tmp6 >= tmp1 tmp8 = tmp6 < tmp3 tmp9 = tmp7 & tmp8 tmp10 = tmp5 & tmp9 tmp11 = tl.load(in_ptr0 + (-10 + x7), tmp10 & xmask, other=float('-inf')) tmp12 = -1 + x0 tmp13 = tmp12 >= tmp1 tmp14 = tmp12 < tmp3 tmp15 = tmp13 & tmp14 tmp16 = tmp5 & tmp15 tmp17 = tl.load(in_ptr0 + (-9 + x7), tmp16 & xmask, other=float('-inf')) tmp18 = triton_helpers.maximum(tmp17, tmp11) tmp19 = x0 tmp20 = tmp19 >= tmp1 tmp21 = tmp19 < tmp3 tmp22 = tmp20 & tmp21 tmp23 = tmp5 & tmp22 tmp24 = tl.load(in_ptr0 + (-8 + x7), tmp23 & xmask, other=float('-inf')) tmp25 = triton_helpers.maximum(tmp24, tmp18) tmp26 = 1 + x0 tmp27 = tmp26 >= tmp1 tmp28 = tmp26 < tmp3 tmp29 = tmp27 & tmp28 tmp30 = tmp5 & tmp29 tmp31 = tl.load(in_ptr0 + (-7 + x7), tmp30 & xmask, other=float('-inf')) tmp32 = triton_helpers.maximum(tmp31, tmp25) tmp33 = 2 + x0 tmp34 = tmp33 >= tmp1 tmp35 = tmp33 < tmp3 tmp36 = tmp34 & tmp35 tmp37 = tmp5 & tmp36 tmp38 = tl.load(in_ptr0 + (-6 + x7), tmp37 & xmask, other=float('-inf')) tmp39 = triton_helpers.maximum(tmp38, tmp32) tmp40 = -1 + x1 tmp41 = tmp40 >= tmp1 tmp42 = tmp40 < tmp3 tmp43 = tmp41 & tmp42 tmp44 = tmp43 & tmp9 tmp45 = tl.load(in_ptr0 + (-6 + x7), tmp44 & xmask, other=float('-inf')) tmp46 = triton_helpers.maximum(tmp45, tmp39) tmp47 = tmp43 & tmp15 tmp48 = tl.load(in_ptr0 + (-5 + x7), tmp47 & xmask, other=float('-inf')) tmp49 = triton_helpers.maximum(tmp48, tmp46) tmp50 = tmp43 & tmp22 tmp51 = tl.load(in_ptr0 + (-4 + x7), tmp50 & xmask, other=float('-inf')) tmp52 = triton_helpers.maximum(tmp51, tmp49) tmp53 = tmp43 & tmp29 tmp54 = tl.load(in_ptr0 + (-3 + x7), tmp53 & xmask, other=float('-inf')) tmp55 = triton_helpers.maximum(tmp54, tmp52) tmp56 = tmp43 & tmp36 tmp57 = tl.load(in_ptr0 + (-2 + x7), tmp56 & xmask, other=float('-inf')) tmp58 = triton_helpers.maximum(tmp57, tmp55) tmp59 = x1 tmp60 = tmp59 >= tmp1 tmp61 = tmp59 < tmp3 tmp62 = tmp60 & tmp61 tmp63 = tmp62 & tmp9 tmp64 = tl.load(in_ptr0 + (-2 + x7), tmp63 & xmask, other=float('-inf')) tmp65 = triton_helpers.maximum(tmp64, tmp58) tmp66 = tmp62 & tmp15 tmp67 = tl.load(in_ptr0 + (-1 + x7), tmp66 & xmask, other=float('-inf')) tmp68 = triton_helpers.maximum(tmp67, tmp65) tmp69 = tmp62 & tmp22 tmp70 = tl.load(in_ptr0 + x7, tmp69 & xmask, other=float('-inf')) tmp71 = triton_helpers.maximum(tmp70, tmp68) tmp72 = tmp62 & tmp29 tmp73 = tl.load(in_ptr0 + (1 + x7), tmp72 & xmask, other=float('-inf')) tmp74 = triton_helpers.maximum(tmp73, tmp71) tmp75 = tmp62 & tmp36 tmp76 = tl.load(in_ptr0 + (2 + x7), tmp75 & xmask, other=float('-inf')) tmp77 = triton_helpers.maximum(tmp76, tmp74) tmp78 = 1 + x1 tmp79 = tmp78 >= tmp1 tmp80 = tmp78 < tmp3 tmp81 = tmp79 & tmp80 tmp82 = tmp81 & tmp9 tmp83 = tl.load(in_ptr0 + (2 + x7), tmp82 & xmask, other=float('-inf')) tmp84 = triton_helpers.maximum(tmp83, tmp77) tmp85 = tmp81 & tmp15 tmp86 = tl.load(in_ptr0 + (3 + x7), tmp85 & xmask, other=float('-inf')) tmp87 = triton_helpers.maximum(tmp86, tmp84) tmp88 = tmp81 & tmp22 tmp89 = tl.load(in_ptr0 + (4 + x7), tmp88 & xmask, other=float('-inf')) tmp90 = triton_helpers.maximum(tmp89, tmp87) tmp91 = tmp81 & tmp29 tmp92 = tl.load(in_ptr0 + (5 + x7), tmp91 & xmask, other=float('-inf')) tmp93 = triton_helpers.maximum(tmp92, tmp90) tmp94 = tmp81 & tmp36 tmp95 = tl.load(in_ptr0 + (6 + x7), tmp94 & xmask, other=float('-inf')) tmp96 = triton_helpers.maximum(tmp95, tmp93) tmp97 = 2 + x1 tmp98 = tmp97 >= tmp1 tmp99 = tmp97 < tmp3 tmp100 = tmp98 & tmp99 tmp101 = tmp100 & tmp9 tmp102 = tl.load(in_ptr0 + (6 + x7), tmp101 & xmask, other=float('-inf')) tmp103 = triton_helpers.maximum(tmp102, tmp96) tmp104 = tmp100 & tmp15 tmp105 = tl.load(in_ptr0 + (7 + x7), tmp104 & xmask, other=float('-inf')) tmp106 = triton_helpers.maximum(tmp105, tmp103) tmp107 = tmp100 & tmp22 tmp108 = tl.load(in_ptr0 + (8 + x7), tmp107 & xmask, other=float('-inf')) tmp109 = triton_helpers.maximum(tmp108, tmp106) tmp110 = tmp100 & tmp29 tmp111 = tl.load(in_ptr0 + (9 + x7), tmp110 & xmask, other=float('-inf')) tmp112 = triton_helpers.maximum(tmp111, tmp109) tmp113 = tmp100 & tmp36 tmp114 = tl.load(in_ptr0 + (10 + x7), tmp113 & xmask, other=float('-inf')) tmp115 = triton_helpers.maximum(tmp114, tmp112) tl.store(out_ptr0 + (x4 + 256 * x3), tmp115, xmask) tl.store(out_ptr1 + (x4 + 256 * x3), tmp116, xmask) @triton.jit def triton_poi_fused_cat_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 x1 = xindex // 64 tmp0 = tl.load(in_ptr0 + x2, xmask) tl.store(out_ptr0 + (x0 + 256 * x1), tmp0, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = torch.ops.aten.max_pool2d_with_indices.default(arg0_1, [13, 13], [1, 1], [6, 6]) buf1 = buf0[0] del buf0 buf3 = torch.ops.aten.max_pool2d_with_indices.default(arg0_1, [9, 9 ], [1, 1], [4, 4]) buf4 = buf3[0] del buf3 buf10 = empty_strided_cuda((4, 16, 4, 4), (256, 16, 4, 1), torch. float32) buf6 = reinterpret_tensor(buf10, (4, 4, 4, 4), (256, 16, 4, 1), 128) buf9 = reinterpret_tensor(buf10, (4, 4, 4, 4), (256, 16, 4, 1), 192) get_raw_stream(0) triton_poi_fused_cat_max_pool2d_with_indices_0[grid(256)](arg0_1, buf6, buf9, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 buf7 = reinterpret_tensor(buf10, (4, 4, 4, 4), (256, 16, 4, 1), 0) triton_poi_fused_cat_1[grid(256)](buf1, buf7, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf1 buf8 = reinterpret_tensor(buf10, (4, 4, 4, 4), (256, 16, 4, 1), 64) triton_poi_fused_cat_1[grid(256)](buf4, buf8, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf4 return buf10, class SpatialPyramidPoolingNew(nn.Module): def __init__(self, pool_sizes=[5, 9, 13]): super(SpatialPyramidPoolingNew, self).__init__() self.maxpools = nn.ModuleList([nn.MaxPool2d(pool_size, 1, pool_size // 2) for pool_size in pool_sizes]) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
BDeMo/yolov4-pytorch
SpatialPyramidPooling
false
2,009
[ "MIT" ]
0
2434afc88d0890bdb19c5655bb7c577d22bf18d3
https://github.com/BDeMo/yolov4-pytorch/tree/2434afc88d0890bdb19c5655bb7c577d22bf18d3
import torch import torch.nn as nn class Model(nn.Module): def __init__(self, pool_sizes=[5, 9, 13]): super().__init__() self.maxpools = nn.ModuleList([nn.MaxPool2d(pool_size, 1, pool_size // 2) for pool_size in pool_sizes]) def forward(self, x): features = [maxpool(x) for maxpool in self.maxpools[::-1]] features = torch.cat(features + [x], dim=1) return features def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
RelationCrossing
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/te/ctez6v53ld4k45ykiwq5ndlpz3jrny2atstdxadog7gcnit65abw.py # Topologically Sorted Source Nodes: [mul, dsttype_node_relation_attention], Original ATen: [aten.mul, aten.sum] # Source node to ATen node mapping: # dsttype_node_relation_attention => sum_1 # mul => mul # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view, %arg1_1), kwargs = {}) # %sum_1 : [num_users=3] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [-1], True), kwargs = {}) triton_poi_fused_mul_sum_0 = async_compile.triton('triton_poi_fused_mul_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 * tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 * tmp12 tmp14 = tmp10 + tmp13 tl.store(out_ptr0 + (x0), tmp14, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/2i/c2ir37xoljdhwkg75h2sbixyvepyluu4socgnd4wiw7oq57nugu4.py # Topologically Sorted Source Nodes: [leaky_relu, dsttype_node_relation_attention_1], Original ATen: [aten.leaky_relu, aten._softmax] # Source node to ATen node mapping: # dsttype_node_relation_attention_1 => amax, exp, sub # leaky_relu => gt, mul_1, where # Graph fragment: # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%sum_1, 0), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_1, 0.2), kwargs = {}) # %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt, %sum_1, %mul_1), kwargs = {}) # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%where, [0], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_leaky_relu_1 = async_compile.triton('triton_poi_fused__softmax_leaky_relu_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_leaky_relu_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_leaky_relu_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp6 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr0 + (16 + x0), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr0 + (32 + x0), xmask, eviction_policy='evict_last') tmp20 = tl.load(in_ptr0 + (48 + x0), xmask, eviction_policy='evict_last') tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 0.2 tmp4 = tmp0 * tmp3 tmp5 = tl.where(tmp2, tmp0, tmp4) tmp7 = tmp6 > tmp1 tmp8 = tmp6 * tmp3 tmp9 = tl.where(tmp7, tmp6, tmp8) tmp11 = tmp10 > tmp1 tmp12 = tmp10 * tmp3 tmp13 = tl.where(tmp11, tmp10, tmp12) tmp14 = triton_helpers.maximum(tmp9, tmp13) tmp16 = tmp15 > tmp1 tmp17 = tmp15 * tmp3 tmp18 = tl.where(tmp16, tmp15, tmp17) tmp19 = triton_helpers.maximum(tmp14, tmp18) tmp21 = tmp20 > tmp1 tmp22 = tmp20 * tmp3 tmp23 = tl.where(tmp21, tmp20, tmp22) tmp24 = triton_helpers.maximum(tmp19, tmp23) tmp25 = tmp5 - tmp24 tmp26 = tl_math.exp(tmp25) tl.store(out_ptr0 + (x2), tmp26, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/f5/cf5347orlkm5ylmh4iouw6qvowwebewwg66dnqrhzt7tg7a4irp5.py # Topologically Sorted Source Nodes: [dsttype_node_relation_attention_1], Original ATen: [aten._softmax] # Source node to ATen node mapping: # dsttype_node_relation_attention_1 => div, sum_2 # Graph fragment: # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [0], True), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_2), kwargs = {}) triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/af/cafnizcuehobeophh3e6mmdetxkitfomc7vxb4m5zc4zqmm42u5i.py # Topologically Sorted Source Nodes: [dsttype_node_relation_attention_1, mul_1, dsttype_node_features_1], Original ATen: [aten._softmax, aten.mul, aten.sum] # Source node to ATen node mapping: # dsttype_node_features_1 => sum_3 # dsttype_node_relation_attention_1 => div, sum_2 # mul_1 => mul_2 # Graph fragment: # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [0], True), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_2), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view, %div), kwargs = {}) # %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_2, [0]), kwargs = {}) triton_poi_fused__softmax_mul_sum_3 = async_compile.triton('triton_poi_fused__softmax_mul_sum_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_mul_sum_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_mul_sum_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (64 + x2), xmask) tmp4 = tl.load(in_ptr1 + (16 + x1), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (128 + x2), xmask) tmp8 = tl.load(in_ptr1 + (32 + x1), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (192 + x2), xmask) tmp12 = tl.load(in_ptr1 + (48 + x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 * tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 * tmp12 tmp14 = tmp10 + tmp13 tl.store(out_ptr0 + (x2), tmp14, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) # Topologically Sorted Source Nodes: [mul, dsttype_node_relation_attention], Original ATen: [aten.mul, aten.sum] stream0 = get_raw_stream(0) triton_poi_fused_mul_sum_0.run(arg0_1, arg1_1, buf0, 64, grid=grid(64), stream=stream0) del arg1_1 buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) # Topologically Sorted Source Nodes: [leaky_relu, dsttype_node_relation_attention_1], Original ATen: [aten.leaky_relu, aten._softmax] triton_poi_fused__softmax_leaky_relu_1.run(buf0, buf1, 64, grid=grid(64), stream=stream0) buf2 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [dsttype_node_relation_attention_1], Original ATen: [aten._softmax] triton_poi_fused__softmax_2.run(buf1, buf2, 64, grid=grid(64), stream=stream0) buf3 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0); del buf1 # reuse # Topologically Sorted Source Nodes: [dsttype_node_relation_attention_1, mul_1, dsttype_node_features_1], Original ATen: [aten._softmax, aten.mul, aten.sum] triton_poi_fused__softmax_mul_sum_3.run(arg0_1, buf2, buf3, 64, grid=grid(64), stream=stream0) del arg0_1 del buf2 return (reinterpret_tensor(buf3, (4, 16), (16, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class RelationCrossing(nn.Module): def __init__(self, in_feats: 'int', out_feats: 'int', num_heads: 'int', dropout: 'float'=0.0, negative_slope: 'float'=0.2): """ Relation crossing layer Parameters ---------- in_feats : pair of ints, input feature size out_feats : int, output feature size num_heads : int, number of heads in Multi-Head Attention dropout : float, optional, dropout rate, defaults: 0.0 negative_slope : float, optional, negative slope rate, defaults: 0.2 """ super(RelationCrossing, self).__init__() self._in_feats = in_feats self._out_feats = out_feats self._num_heads = num_heads self.dropout = nn.Dropout(dropout) self.leaky_relu = nn.LeakyReLU(negative_slope) def forward(self, dsttype_node_features: 'torch.Tensor', relations_crossing_attention_weight: 'nn.Parameter'): """ :param dsttype_node_features: a tensor of (dsttype_node_relations_num, num_dst_nodes, n_heads * hidden_dim) :param relations_crossing_attention_weight: Parameter the shape is (n_heads, hidden_dim) :return: output_features: a Tensor """ if len(dsttype_node_features) == 1: dsttype_node_features = dsttype_node_features.squeeze(dim=0) else: dsttype_node_features = dsttype_node_features.reshape( dsttype_node_features.shape[0], -1, self._num_heads, self. _out_feats) dsttype_node_relation_attention = (dsttype_node_features * relations_crossing_attention_weight).sum(dim=-1, keepdim=True) dsttype_node_relation_attention = F.softmax(self.leaky_relu( dsttype_node_relation_attention), dim=0) dsttype_node_features = (dsttype_node_features * dsttype_node_relation_attention).sum(dim=0) dsttype_node_features = self.dropout(dsttype_node_features) dsttype_node_features = dsttype_node_features.reshape(-1, self. _num_heads * self._out_feats) return dsttype_node_features def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_feats': 4, 'out_feats': 4, 'num_heads': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 * tmp1 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 * tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 * tmp12 tmp14 = tmp10 + tmp13 tl.store(out_ptr0 + x0, tmp14, xmask) @triton.jit def triton_poi_fused__softmax_leaky_relu_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp6 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr0 + (16 + x0), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr0 + (32 + x0), xmask, eviction_policy='evict_last') tmp20 = tl.load(in_ptr0 + (48 + x0), xmask, eviction_policy='evict_last') tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 0.2 tmp4 = tmp0 * tmp3 tmp5 = tl.where(tmp2, tmp0, tmp4) tmp7 = tmp6 > tmp1 tmp8 = tmp6 * tmp3 tmp9 = tl.where(tmp7, tmp6, tmp8) tmp11 = tmp10 > tmp1 tmp12 = tmp10 * tmp3 tmp13 = tl.where(tmp11, tmp10, tmp12) tmp14 = triton_helpers.maximum(tmp9, tmp13) tmp16 = tmp15 > tmp1 tmp17 = tmp15 * tmp3 tmp18 = tl.where(tmp16, tmp15, tmp17) tmp19 = triton_helpers.maximum(tmp14, tmp18) tmp21 = tmp20 > tmp1 tmp22 = tmp20 * tmp3 tmp23 = tl.where(tmp21, tmp20, tmp22) tmp24 = triton_helpers.maximum(tmp19, tmp23) tmp25 = tmp5 - tmp24 tmp26 = tl_math.exp(tmp25) tl.store(out_ptr0 + x2, tmp26, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused__softmax_mul_sum_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (64 + x2), xmask) tmp4 = tl.load(in_ptr1 + (16 + x1), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (128 + x2), xmask) tmp8 = tl.load(in_ptr1 + (32 + x1), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (192 + x2), xmask) tmp12 = tl.load(in_ptr1 + (48 + x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 * tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 * tmp12 tmp14 = tmp10 + tmp13 tl.store(out_ptr0 + x2, tmp14, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) get_raw_stream(0) triton_poi_fused_mul_sum_0[grid(64)](arg0_1, arg1_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg1_1 buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) triton_poi_fused__softmax_leaky_relu_1[grid(64)](buf0, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) buf2 = buf0 del buf0 triton_poi_fused__softmax_2[grid(64)](buf1, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) buf3 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0) del buf1 triton_poi_fused__softmax_mul_sum_3[grid(64)](arg0_1, buf2, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 del buf2 return reinterpret_tensor(buf3, (4, 16), (16, 1), 0), class RelationCrossingNew(nn.Module): def __init__(self, in_feats: 'int', out_feats: 'int', num_heads: 'int', dropout: 'float'=0.0, negative_slope: 'float'=0.2): """ Relation crossing layer Parameters ---------- in_feats : pair of ints, input feature size out_feats : int, output feature size num_heads : int, number of heads in Multi-Head Attention dropout : float, optional, dropout rate, defaults: 0.0 negative_slope : float, optional, negative slope rate, defaults: 0.2 """ super(RelationCrossingNew, self).__init__() self._in_feats = in_feats self._out_feats = out_feats self._num_heads = num_heads self.dropout = nn.Dropout(dropout) self.leaky_relu = nn.LeakyReLU(negative_slope) def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
BUPTlfq/OpenHGNN
RelationCrossing
false
2,010
[ "Apache-2.0" ]
0
77041e68c33a8a42a2c187c6e42d85b81cbb25d3
https://github.com/BUPTlfq/OpenHGNN/tree/77041e68c33a8a42a2c187c6e42d85b81cbb25d3
import torch import torch.nn as nn import torch.nn.functional as F class Model(nn.Module): def __init__(self, in_feats: 'int', out_feats: 'int', num_heads: 'int', dropout: 'float'=0.0, negative_slope: 'float'=0.2): """ Relation crossing layer Parameters ---------- in_feats : pair of ints, input feature size out_feats : int, output feature size num_heads : int, number of heads in Multi-Head Attention dropout : float, optional, dropout rate, defaults: 0.0 negative_slope : float, optional, negative slope rate, defaults: 0.2 """ super().__init__() self._in_feats = in_feats self._out_feats = out_feats self._num_heads = num_heads self.dropout = nn.Dropout(dropout) self.leaky_relu = nn.LeakyReLU(negative_slope) def forward(self, dsttype_node_features: 'torch.Tensor', relations_crossing_attention_weight: 'nn.Parameter'): """ :param dsttype_node_features: a tensor of (dsttype_node_relations_num, num_dst_nodes, n_heads * hidden_dim) :param relations_crossing_attention_weight: Parameter the shape is (n_heads, hidden_dim) :return: output_features: a Tensor """ if len(dsttype_node_features) == 1: dsttype_node_features = dsttype_node_features.squeeze(dim=0) else: dsttype_node_features = dsttype_node_features.reshape( dsttype_node_features.shape[0], -1, self._num_heads, self. _out_feats) dsttype_node_relation_attention = (dsttype_node_features * relations_crossing_attention_weight).sum(dim=-1, keepdim=True) dsttype_node_relation_attention = F.softmax(self.leaky_relu( dsttype_node_relation_attention), dim=0) dsttype_node_features = (dsttype_node_features * dsttype_node_relation_attention).sum(dim=0) dsttype_node_features = self.dropout(dsttype_node_features) dsttype_node_features = dsttype_node_features.reshape(-1, self. _num_heads * self._out_feats) return dsttype_node_features def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 4, 4]
LayerNorm
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/hm/chmguiko7olkc2jornwq2r5upocnkz3x3wp75lbdsdsrp7szyzy7.py # Topologically Sorted Source Nodes: [means, sub, stds, activation], Original ATen: [aten.mean, aten.sub, aten.std, aten.div] # Source node to ATen node mapping: # activation => div # means => mean # stds => sqrt, var # sub => sub # Graph fragment: # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%arg0_1, [1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %mean), kwargs = {}) # %var : [num_users=1] = call_function[target=torch.ops.aten.var.correction](args = (%arg0_1, [1]), kwargs = {correction: 1.0, keepdim: True}) # %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%var,), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, %sqrt), kwargs = {}) triton_poi_fused_div_mean_std_sub_0 = async_compile.triton('triton_poi_fused_div_mean_std_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_mean_std_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_mean_std_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = 4.0 tmp9 = tmp7 / tmp8 tmp10 = tmp0 - tmp9 tmp11 = tmp1 - tmp9 tmp12 = tmp11 * tmp11 tmp13 = tmp2 - tmp9 tmp14 = tmp13 * tmp13 tmp15 = tmp12 + tmp14 tmp16 = tmp4 - tmp9 tmp17 = tmp16 * tmp16 tmp18 = tmp15 + tmp17 tmp19 = tmp6 - tmp9 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = 3.0 tmp23 = tmp21 / tmp22 tmp24 = libdevice.sqrt(tmp23) tmp25 = tmp10 / tmp24 tl.store(out_ptr0 + (x3), tmp25, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [means, sub, stds, activation], Original ATen: [aten.mean, aten.sub, aten.std, aten.div] stream0 = get_raw_stream(0) triton_poi_fused_div_mean_std_sub_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class LayerNorm(nn.Module): def __init__(self, *args): super().__init__() def forward(self, activation): if len(activation.size()) == 3: ori_size = activation.size() activation = activation.view(-1, activation.size(-1)) else: ori_size = None means = torch.mean(activation, dim=1, keepdim=True) stds = torch.std(activation, dim=1, keepdim=True) activation = (activation - means) / stds if ori_size is not None: activation = activation.view(ori_size) return activation def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_div_mean_std_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = 4.0 tmp9 = tmp7 / tmp8 tmp10 = tmp0 - tmp9 tmp11 = tmp1 - tmp9 tmp12 = tmp11 * tmp11 tmp13 = tmp2 - tmp9 tmp14 = tmp13 * tmp13 tmp15 = tmp12 + tmp14 tmp16 = tmp4 - tmp9 tmp17 = tmp16 * tmp16 tmp18 = tmp15 + tmp17 tmp19 = tmp6 - tmp9 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = 3.0 tmp23 = tmp21 / tmp22 tmp24 = libdevice.sqrt(tmp23) tmp25 = tmp10 / tmp24 tl.store(out_ptr0 + x3, tmp25, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_mean_std_sub_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class LayerNormNew(nn.Module): def __init__(self, *args): super().__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
BaiYuhaoSpiceeYJ/SEGAN_denoise
LayerNorm
false
2,011
[ "MIT" ]
0
5bf65ae72b9f0a996ae338c53c68c4967e08cd59
https://github.com/BaiYuhaoSpiceeYJ/SEGAN_denoise/tree/5bf65ae72b9f0a996ae338c53c68c4967e08cd59
import torch import torch.nn as nn class Model(nn.Module): def __init__(self, *args): super().__init__() def forward(self, activation): if len(activation.size()) == 3: ori_size = activation.size() activation = activation.view(-1, activation.size(-1)) else: ori_size = None means = torch.mean(activation, dim=1, keepdim=True) stds = torch.std(activation, dim=1, keepdim=True) activation = (activation - means) / stds if ori_size is not None: activation = activation.view(ori_size) return activation def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
FeatExemplarAvgBlock
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/hp/chptg55ofbvcnhmdfgvcjqku736b3xcyxeahxk2eb2jpz7f2wey2.py # Topologically Sorted Source Nodes: [weight_novel_1], Original ATen: [aten.div] # Source node to ATen node mapping: # weight_novel_1 => div # Graph fragment: # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%bmm, %expand), kwargs = {}) triton_poi_fused_div_0 = async_compile.triton('triton_poi_fused_div_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 4) % 4 x2 = (xindex // 16) tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1 + (16*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (4 + x1 + (16*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (8 + x1 + (16*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (12 + x1 + (16*x2)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(in_out_ptr0 + (x3), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [weight_novel], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(arg0_1, (4, 4, 4), (16, 1, 4), 0), arg1_1, out=buf0) del arg1_1 buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [weight_novel_1], Original ATen: [aten.div] stream0 = get_raw_stream(0) triton_poi_fused_div_0.run(buf1, arg0_1, 64, grid=grid(64), stream=stream0) del arg0_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.optim import torch.nn.parallel class FeatExemplarAvgBlock(nn.Module): def __init__(self, nFeat): super(FeatExemplarAvgBlock, self).__init__() def forward(self, features_train, labels_train): labels_train_transposed = labels_train.transpose(1, 2) weight_novel = torch.bmm(labels_train_transposed, features_train) weight_novel = weight_novel.div(labels_train_transposed.sum(dim=2, keepdim=True).expand_as(weight_novel)) return weight_novel def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'nFeat': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.optim import torch.nn.parallel assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_div_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 4 x2 = xindex // 16 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x1 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (4 + x1 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (8 + x1 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (12 + x1 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(in_out_ptr0 + x3, tmp8, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(arg0_1, (4, 4, 4), (16, 1, 4), 0), arg1_1, out=buf0) del arg1_1 buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_div_0[grid(64)](buf1, arg0_1, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 return buf1, class FeatExemplarAvgBlockNew(nn.Module): def __init__(self, nFeat): super(FeatExemplarAvgBlockNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
Basasuya/FewShotWithoutForgetting
FeatExemplarAvgBlock
false
2,012
[ "MIT" ]
0
eecc70e416ed82999124ddfca1b145f6dbcd74a6
https://github.com/Basasuya/FewShotWithoutForgetting/tree/eecc70e416ed82999124ddfca1b145f6dbcd74a6
import torch import torch.nn as nn import torch.optim import torch.nn.parallel class Model(nn.Module): def __init__(self, nFeat): super().__init__() def forward(self, features_train, labels_train): labels_train_transposed = labels_train.transpose(1, 2) weight_novel = torch.bmm(labels_train_transposed, features_train) weight_novel = weight_novel.div(labels_train_transposed.sum(dim=2, keepdim=True).expand_as(weight_novel)) return weight_novel def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [4]
CombFilter
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/4e/c4eq4lic2763lii3g56qatkonozcq5rienq4mbtevyedogylisol.py # Topologically Sorted Source Nodes: [x_p], Original ATen: [aten.constant_pad_nd] # Source node to ATen node mapping: # x_p => constant_pad_nd # Graph fragment: # %constant_pad_nd : [num_users=1] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%primals_1, [4, 0], 0.0), kwargs = {}) triton_poi_fused_constant_pad_nd_0 = async_compile.triton('triton_poi_fused_constant_pad_nd_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_constant_pad_nd_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = (xindex // 8) x2 = xindex tmp0 = (-4) + x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.load(in_ptr0 + ((-4) + x0 + (4*x1)), tmp2 & xmask, other=0.0) tl.store(out_ptr0 + (x2), tmp3, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 2), (8, 2, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32) # Topologically Sorted Source Nodes: [x_p], Original ATen: [aten.constant_pad_nd] stream0 = get_raw_stream(0) triton_poi_fused_constant_pad_nd_0.run(primals_1, buf0, 32, grid=grid(32), stream=stream0) del primals_1 # Topologically Sorted Source Nodes: [y], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(reinterpret_tensor(buf0, (1, 4, 8), (0, 8, 1), 0), primals_2, stride=(1,), padding=(0,), dilation=(4,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf1, (1, 4, 4), (16, 4, 1)) return (reinterpret_tensor(buf1, (4, 4), (4, 1), 0), primals_2, reinterpret_tensor(buf0, (1, 4, 8), (32, 8, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 2), (8, 2, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class CombFilter(nn.Module): def __init__(self, ninputs, fmaps, L): super().__init__() self.L = L self.filt = nn.Conv1d(ninputs, fmaps, 2, dilation=L, bias=False) r_init_weight = torch.ones(ninputs * fmaps, 2) r_init_weight[:, 0] = torch.rand(r_init_weight.size(0)) self.filt.weight.data = r_init_weight.view(fmaps, ninputs, 2) def forward(self, x): x_p = F.pad(x, (self.L, 0)) y = self.filt(x_p) return y def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'ninputs': 4, 'fmaps': 4, 'L': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = -4 + x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.load(in_ptr0 + (-4 + x0 + 4 * x1), tmp2 & xmask, other=0.0) tl.store(out_ptr0 + x2, tmp3, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 2), (8, 2, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32) get_raw_stream(0) triton_poi_fused_constant_pad_nd_0[grid(32)](primals_1, buf0, 32, XBLOCK=32, num_warps=1, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(reinterpret_tensor(buf0, (1, 4, 8 ), (0, 8, 1), 0), primals_2, stride=(1,), padding=(0,), dilation=(4,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf1, (1, 4, 4), (16, 4, 1)) return reinterpret_tensor(buf1, (4, 4), (4, 1), 0 ), primals_2, reinterpret_tensor(buf0, (1, 4, 8), (32, 8, 1), 0) class CombFilterNew(nn.Module): def __init__(self, ninputs, fmaps, L): super().__init__() self.L = L self.filt = nn.Conv1d(ninputs, fmaps, 2, dilation=L, bias=False) r_init_weight = torch.ones(ninputs * fmaps, 2) r_init_weight[:, 0] = torch.rand(r_init_weight.size(0)) self.filt.weight.data = r_init_weight.view(fmaps, ninputs, 2) def forward(self, input_0): primals_2 = self.filt.weight primals_1 = input_0 output = call([primals_1, primals_2]) return output[0]
BaiYuhaoSpiceeYJ/SEGAN_denoise
CombFilter
false
2,013
[ "MIT" ]
0
5bf65ae72b9f0a996ae338c53c68c4967e08cd59
https://github.com/BaiYuhaoSpiceeYJ/SEGAN_denoise/tree/5bf65ae72b9f0a996ae338c53c68c4967e08cd59
import torch import torch.nn as nn import torch.nn.functional as F class Model(nn.Module): def __init__(self, ninputs, fmaps, L): super().__init__() self.L = L self.filt = nn.Conv1d(ninputs, fmaps, 2, dilation=L, bias=False) r_init_weight = torch.ones(ninputs * fmaps, 2) r_init_weight[:, 0] = torch.rand(r_init_weight.size(0)) self.filt.weight.data = r_init_weight.view(fmaps, ninputs, 2) def forward(self, x): x_p = F.pad(x, (self.L, 0)) y = self.filt(x_p) return y def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [4, 4, 4]
GatedLinear
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/d6/cd6sqv523euqx343sscwea2kmp4ka25ufko2kk3g2vvsmhxpc4f2.py # Topologically Sorted Source Nodes: [tanh, sigmoid, mul], Original ATen: [aten.tanh, aten.sigmoid, aten.mul] # Source node to ATen node mapping: # mul => mul # sigmoid => sigmoid # tanh => tanh # Graph fragment: # %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%view_1,), kwargs = {}) # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%view_3,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%tanh, %sigmoid), kwargs = {}) triton_poi_fused_mul_sigmoid_tanh_0 = async_compile.triton('triton_poi_fused_mul_sigmoid_tanh_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sigmoid_tanh_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_sigmoid_tanh_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp2 = tl.load(in_ptr1 + (x0), xmask) tmp1 = libdevice.tanh(tmp0) tmp3 = tl.sigmoid(tmp2) tmp4 = tmp1 * tmp3 tl.store(out_ptr0 + (x0), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm] extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_4 del primals_5 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [tanh, sigmoid, mul], Original ATen: [aten.tanh, aten.sigmoid, aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_mul_sigmoid_tanh_0.run(buf0, buf1, buf2, 256, grid=grid(256), stream=stream0) return (buf2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0, buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn from torch.nn import init as init class GatedLinear(nn.Module): def __init__(self, in_ch, out_ch): super().__init__() self.lin1 = nn.Linear(in_ch, out_ch) self.lin2 = nn.Linear(in_ch, out_ch) self.sig = nn.Sigmoid() self.tanh = nn.Tanh() def forward(self, x): return self.tanh(self.lin1(x)) * self.sig(self.lin2(x)) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_ch': 4, 'out_ch': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch import nn from torch.nn import init as init assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_sigmoid_tanh_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp2 = tl.load(in_ptr1 + x0, xmask) tmp1 = libdevice.tanh(tmp0) tmp3 = tl.sigmoid(tmp2) tmp4 = tmp1 * tmp3 tl.store(out_ptr0 + x0, tmp4, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf1) del primals_4 del primals_5 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_sigmoid_tanh_0[grid(256)](buf0, buf1, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1) return buf2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0, buf1 class GatedLinearNew(nn.Module): def __init__(self, in_ch, out_ch): super().__init__() self.lin1 = nn.Linear(in_ch, out_ch) self.lin2 = nn.Linear(in_ch, out_ch) self.sig = nn.Sigmoid() self.tanh = nn.Tanh() def forward(self, input_0): primals_1 = self.lin1.weight primals_2 = self.lin1.bias primals_4 = self.lin2.weight primals_5 = self.lin2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
BaekduChoi/Halftoning_v2
GatedLinear
false
2,014
[ "BSD-3-Clause" ]
0
fdb7040e1a4044f23ef9c92757bbb90c23685afe
https://github.com/BaekduChoi/Halftoning_v2/tree/fdb7040e1a4044f23ef9c92757bbb90c23685afe
import torch from torch import nn from torch.nn import init as init class Model(nn.Module): def __init__(self, in_ch, out_ch): super().__init__() self.lin1 = nn.Linear(in_ch, out_ch) self.lin2 = nn.Linear(in_ch, out_ch) self.sig = nn.Sigmoid() self.tanh = nn.Tanh() def forward(self, x): return self.tanh(self.lin1(x)) * self.sig(self.lin2(x)) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 4]
AttentionPool2d
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/ov/covbryzjnff2kb26c5gkcqbvct6kdwzanlx3iu6ee24itsit76o3.py # Topologically Sorted Source Nodes: [mean], Original ATen: [aten.mean] # Source node to ATen node mapping: # mean => mean # Graph fragment: # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%permute, [0], True), kwargs = {}) triton_per_fused_mean_0 = async_compile.triton('triton_per_fused_mean_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[16, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_mean_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 16 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tl.store(out_ptr0 + (x0), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/3y/c3yxwtlf7q4oo5ov7royd257tecdxmuzqqag2za7buohahsb7mfs.py # Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.cat, aten.add] # Source node to ATen node mapping: # x_1 => cat # x_2 => add # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%mean, %permute],), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%cat, %unsqueeze), kwargs = {}) triton_poi_fused_add_cat_1 = async_compile.triton('triton_poi_fused_add_cat_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_cat_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_cat_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 272 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = (xindex // 16) x3 = xindex % 16 x0 = xindex % 4 x4 = xindex tmp15 = tl.load(in_ptr2 + (x0 + (4*x2)), xmask, eviction_policy='evict_last') tmp0 = x2 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x3), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = 16.0 tmp7 = tmp5 / tmp6 tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype) tmp9 = tl.where(tmp4, tmp7, tmp8) tmp10 = tmp0 >= tmp3 tmp11 = tl.full([1], 17, tl.int64) tmp12 = tmp0 < tmp11 tmp13 = tl.load(in_ptr1 + ((16*x3) + ((-1) + x2)), tmp10 & xmask, eviction_policy='evict_last', other=0.0) tmp14 = tl.where(tmp4, tmp9, tmp13) tmp16 = tmp14 + tmp15 tl.store(out_ptr0 + (x4), tmp16, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/uc/cucgrga44gtlwzw6ehoy4jrwzm5fghn3ljf7iugmdjhe6m7mjcas.py # Topologically Sorted Source Nodes: [cat_1], Original ATen: [aten.cat] # Source node to ATen node mapping: # cat_1 => cat_1 # Graph fragment: # %cat_1 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_6, %primals_7, %primals_8],), kwargs = {}) triton_poi_fused_cat_2 = async_compile.triton('triton_poi_fused_cat_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 12 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + ((-4) + x0), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1], 12, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tl.load(in_ptr2 + ((-8) + x0), tmp11 & xmask, eviction_policy='evict_last', other=0.0) tmp15 = tl.where(tmp9, tmp10, tmp14) tmp16 = tl.where(tmp4, tmp5, tmp15) tl.store(out_ptr0 + (x0), tmp16, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/gq/cgqswernq7km7s5e6iqvidwhg6nuo5b5zp6m5py3pnbrdzepe236.py # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.mul, aten.transpose] # Source node to ATen node mapping: # multi_head_attention_forward => mul # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Scalar](args = (%view_10, 1.0), kwargs = {}) # %permute_17 : [num_users=1] = call_function[target=torch.ops.aten.permute.default](args = (%view_13, [0, 2, 1]), kwargs = {}) triton_poi_fused_mul_transpose_3 = async_compile.triton('triton_poi_fused_mul_transpose_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 32], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_transpose_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_transpose_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 17 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 4 tmp0 = tl.load(in_ptr0 + (y3 + (16*x2)), xmask & ymask, eviction_policy='evict_last') tmp1 = y0 tmp2 = tl.full([1, 1], 0, tl.int64) tmp3 = tmp1 >= tmp2 tmp4 = tl.full([1, 1], 4, tl.int64) tmp5 = tmp1 < tmp4 tmp6 = tl.load(in_ptr1 + (tl.broadcast_to(y0, [XBLOCK, YBLOCK])), tmp5 & xmask & ymask, eviction_policy='evict_last', other=0.0) tmp7 = tmp1 >= tmp4 tmp8 = tl.full([1, 1], 8, tl.int64) tmp9 = tmp1 < tmp8 tmp10 = tmp7 & tmp9 tmp11 = tl.load(in_ptr2 + (tl.broadcast_to((-4) + y0, [XBLOCK, YBLOCK])), tmp10 & xmask & ymask, eviction_policy='evict_last', other=0.0) tmp12 = tmp1 >= tmp8 tmp13 = tl.full([1, 1], 12, tl.int64) tmp14 = tmp1 < tmp13 tmp15 = tl.load(in_ptr3 + (tl.broadcast_to((-8) + y0, [XBLOCK, YBLOCK])), tmp12 & xmask & ymask, eviction_policy='evict_last', other=0.0) tmp16 = tl.where(tmp10, tmp11, tmp15) tmp17 = tl.where(tmp5, tmp6, tmp16) tmp18 = tmp0 + tmp17 tmp19 = 1.0 tmp20 = tmp18 * tmp19 tl.store(out_ptr0 + (x2 + (17*y3)), tmp20, xmask & ymask) tl.store(out_ptr1 + (y3 + (16*x2)), tmp20, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/qz/cqzbvlh3tf752q5rj44eh5o54zbmctvajzgosqadhjdb5lbyqjy2.py # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.mul, aten.transpose] # Source node to ATen node mapping: # multi_head_attention_forward => mul_1 # Graph fragment: # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Scalar](args = (%permute_7, 1.0), kwargs = {}) # %permute_18 : [num_users=1] = call_function[target=torch.ops.aten.permute.default](args = (%view_14, [0, 2, 1]), kwargs = {}) triton_poi_fused_mul_transpose_4 = async_compile.triton('triton_poi_fused_mul_transpose_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 32], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_transpose_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_transpose_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 17 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 4 tmp0 = tl.load(in_ptr0 + (y3 + (16*x2)), xmask & ymask, eviction_policy='evict_last') tmp1 = 4 + y0 tmp2 = tl.full([1, 1], 0, tl.int64) tmp3 = tmp1 >= tmp2 tmp4 = tl.full([1, 1], 4, tl.int64) tmp5 = tmp1 < tmp4 tmp6 = tl.load(in_ptr1 + (tl.broadcast_to(4 + y0, [XBLOCK, YBLOCK])), tmp5 & xmask & ymask, eviction_policy='evict_last', other=0.0) tmp7 = tmp1 >= tmp4 tmp8 = tl.full([1, 1], 8, tl.int64) tmp9 = tmp1 < tmp8 tmp10 = tmp7 & tmp9 tmp11 = tl.load(in_ptr2 + (tl.broadcast_to(y0, [XBLOCK, YBLOCK])), tmp10 & xmask & ymask, eviction_policy='evict_last', other=0.0) tmp12 = tmp1 >= tmp8 tmp13 = tl.full([1, 1], 12, tl.int64) tmp14 = tmp1 < tmp13 tmp15 = tl.load(in_ptr3 + (tl.broadcast_to((-4) + y0, [XBLOCK, YBLOCK])), tmp12 & xmask & ymask, eviction_policy='evict_last', other=0.0) tmp16 = tl.where(tmp10, tmp11, tmp15) tmp17 = tl.where(tmp5, tmp6, tmp16) tmp18 = tmp0 + tmp17 tmp19 = 1.0 tmp20 = tmp18 * tmp19 tl.store(out_ptr0 + (x2 + (17*y3)), tmp20, xmask & ymask) tl.store(out_ptr1 + (y3 + (16*x2)), tmp20, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/q4/cq45jdoeuvgpxv4zofa7tafzriebamhh2wkhnkrmvo3v75s7gbkj.py # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._safe_softmax] # Source node to ATen node mapping: # multi_head_attention_forward => amax, any_1, div, eq, exp, full_default, logical_not, logical_not_1, sub, sum_1, where # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_15, [-1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_15, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) # %eq : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%view_15, -inf), kwargs = {}) # %logical_not : [num_users=1] = call_function[target=torch.ops.aten.logical_not.default](args = (%eq,), kwargs = {}) # %any_1 : [num_users=1] = call_function[target=torch.ops.aten.any.dim](args = (%logical_not, -1, True), kwargs = {}) # %logical_not_1 : [num_users=1] = call_function[target=torch.ops.aten.logical_not.default](args = (%any_1,), kwargs = {}) # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 17, 17], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%logical_not_1, %full_default, %div), kwargs = {}) triton_per_fused__safe_softmax_5 = async_compile.triton('triton_per_fused__safe_softmax_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[512, 32], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__safe_softmax_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 3, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__safe_softmax_5(in_ptr0, out_ptr3, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 272 rnumel = 17 RBLOCK: tl.constexpr = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = rindex < rnumel r1 = rindex x0 = xindex x2 = xindex % 68 x3 = (xindex // 68) tmp0 = tl.load(in_ptr0 + (r1 + (17*x0)), rmask & xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(rmask & xmask, tmp1, float("-inf")) tmp4 = triton_helpers.max2(tmp3, 1)[:, None] tmp5 = tmp0 - tmp4 tmp6 = tl_math.exp(tmp5) tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.where(rmask & xmask, tmp7, 0) tmp10 = tl.sum(tmp9, 1)[:, None] tmp11 = float("-inf") tmp12 = tmp0 == tmp11 tmp13 = tmp12 == 0 tmp14 = tmp13.to(tl.int64) tmp15 = (tmp14 != 0) tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK]) tmp18 = tl.where(rmask & xmask, tmp16, 0) tmp19 = triton_helpers.any(tmp18, 1)[:, None] tmp20 = tmp19 == 0 tmp21 = tmp6 / tmp10 tmp22 = 0.0 tmp23 = tl.where(tmp20, tmp22, tmp21) tl.store(out_ptr3 + (r1 + (17*x2) + (1184*x3)), tmp23, rmask & xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/hp/chpbwsfm77p7craq5ckxpabfykbcahcp5xaoqi5y3wp6bfgatgpn.py # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.bmm] # Source node to ATen node mapping: # multi_head_attention_forward => bmm_1 # Graph fragment: # %bmm_1 : [num_users=1] = call_function[target=torch.ops.aten.bmm.default](args = (%view_16, %view_17), kwargs = {}) triton_poi_fused_bmm_6 = async_compile.triton('triton_poi_fused_bmm_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8192], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_bmm_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_bmm_6(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4624 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 289 x1 = (xindex // 289) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (289*(x1 % 4)) + (1184*(x1 // 4))), xmask) tl.store(out_ptr0 + (x2), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/qb/cqb3dcbu2gzzir2rbde24vaunu7kucgqq3ftpydndkry6kdjyyqy.py # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.clone] # Source node to ATen node mapping: # multi_head_attention_forward => clone # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_8,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_7 = async_compile.triton('triton_poi_fused_clone_7', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_7(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 17 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (17*x1)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x1 + (16*y0)), tmp0, xmask & ymask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (17, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4, ), (1, )) assert_size_stride(primals_7, (4, ), (1, )) assert_size_stride(primals_8, (4, ), (1, )) assert_size_stride(primals_9, (4, 4), (4, 1)) assert_size_stride(primals_10, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((1, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mean], Original ATen: [aten.mean] stream0 = get_raw_stream(0) triton_per_fused_mean_0.run(primals_1, buf0, 16, 16, grid=grid(16), stream=stream0) buf1 = empty_strided_cuda((17, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.cat, aten.add] triton_poi_fused_add_cat_1.run(buf0, primals_1, primals_2, buf1, 272, grid=grid(272), stream=stream0) del buf0 del primals_1 del primals_2 buf2 = empty_strided_cuda((68, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf1, (68, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf2) buf3 = empty_strided_cuda((68, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf1, (68, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf3) buf4 = empty_strided_cuda((12, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [cat_1], Original ATen: [aten.cat] triton_poi_fused_cat_2.run(primals_6, primals_7, primals_8, buf4, 12, grid=grid(12), stream=stream0) buf5 = empty_strided_cuda((68, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.addmm] extern_kernels.addmm(reinterpret_tensor(buf4, (4, ), (1, ), 8), reinterpret_tensor(buf1, (68, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf5) del buf4 buf6 = empty_strided_cuda((4, 4, 17, 1), (68, 17, 1, 1), torch.float32) buf17 = empty_strided_cuda((16, 1, 17), (1, 1, 16), torch.float32) # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.mul, aten.transpose] triton_poi_fused_mul_transpose_3.run(buf2, primals_6, primals_7, primals_8, buf6, buf17, 16, 17, grid=grid(16, 17), stream=stream0) buf7 = reinterpret_tensor(buf2, (4, 4, 1, 17), (68, 17, 17, 1), 0); del buf2 # reuse buf18 = empty_strided_cuda((16, 17, 1), (1, 16, 1), torch.float32) # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.mul, aten.transpose] triton_poi_fused_mul_transpose_4.run(buf3, primals_6, primals_7, primals_8, buf7, buf18, 16, 17, grid=grid(16, 17), stream=stream0) del buf3 del primals_6 del primals_7 del primals_8 buf8 = empty_strided_cuda((16, 17, 17), (289, 17, 1), torch.float32) # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf6, (16, 17, 1), (17, 1, 0), 0), reinterpret_tensor(buf7, (16, 1, 17), (17, 0, 1), 0), out=buf8) buf12 = empty_strided_cuda((4, 4, 17, 17), (1184, 289, 17, 1), torch.float32) # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._safe_softmax] triton_per_fused__safe_softmax_5.run(buf8, buf12, 272, 17, grid=grid(272), stream=stream0) buf13 = buf8; del buf8 # reuse # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.bmm] triton_poi_fused_bmm_6.run(buf12, buf13, 4624, grid=grid(4624), stream=stream0) buf14 = reinterpret_tensor(buf7, (16, 17, 1), (17, 1, 1), 0); del buf7 # reuse # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.bmm] extern_kernels.bmm(buf13, reinterpret_tensor(buf5, (16, 17, 1), (1, 16, 0), 0), out=buf14) del buf13 buf15 = reinterpret_tensor(buf6, (17, 4, 4, 1), (16, 4, 1, 1), 0); del buf6 # reuse # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.clone] triton_poi_fused_clone_7.run(buf14, buf15, 17, 16, grid=grid(17, 16), stream=stream0) buf16 = reinterpret_tensor(buf14, (68, 4), (4, 1), 0); del buf14 # reuse # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.addmm] extern_kernels.addmm(primals_10, reinterpret_tensor(buf15, (68, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf16) del primals_10 return (reinterpret_tensor(buf16, (4, 4), (4, 1), 0), reinterpret_tensor(buf1, (68, 4), (4, 1), 0), buf12, reinterpret_tensor(buf15, (68, 4), (4, 1), 0), primals_9, reinterpret_tensor(buf5, (16, 1, 17), (1, 1, 16), 0), buf17, buf18, primals_5, primals_4, primals_3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((17, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F from torch import nn class AttentionPool2d(nn.Module): def __init__(self, spacial_dim: 'int', embed_dim: 'int', num_heads: 'int', output_dim: 'int'=None): super().__init__() self.positional_embedding = nn.Parameter(torch.randn(spacial_dim ** 2 + 1, embed_dim) / embed_dim ** 0.5) self.k_proj = nn.Linear(embed_dim, embed_dim) self.q_proj = nn.Linear(embed_dim, embed_dim) self.v_proj = nn.Linear(embed_dim, embed_dim) self.c_proj = nn.Linear(embed_dim, output_dim or embed_dim) self.num_heads = num_heads def forward(self, x): x = x.reshape(x.shape[0], x.shape[1], x.shape[2] * x.shape[3]).permute( 2, 0, 1) x = torch.cat([x.mean(dim=0, keepdim=True), x], dim=0) x = x + self.positional_embedding[:, None, :] x, _ = F.multi_head_attention_forward(query=x, key=x, value=x, embed_dim_to_check=x.shape[-1], num_heads=self.num_heads, q_proj_weight=self.q_proj.weight, k_proj_weight=self.k_proj. weight, v_proj_weight=self.v_proj.weight, in_proj_weight=None, in_proj_bias=torch.cat([self.q_proj.bias, self.k_proj.bias, self.v_proj.bias]), bias_k=None, bias_v=None, add_zero_attn= False, dropout_p=0, out_proj_weight=self.c_proj.weight, out_proj_bias=self.c_proj.bias, use_separate_proj_weight=True, training=self.training, need_weights=False) return x[0] def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'spacial_dim': 4, 'embed_dim': 4, 'num_heads': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_mean_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl. constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tl.store(out_ptr0 + x0, tmp4, xmask) @triton.jit def triton_poi_fused_add_cat_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 272 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex // 16 x3 = xindex % 16 x0 = xindex % 4 x4 = xindex tmp15 = tl.load(in_ptr2 + (x0 + 4 * x2), xmask, eviction_policy= 'evict_last') tmp0 = x2 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + x3, tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = 16.0 tmp7 = tmp5 / tmp6 tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype) tmp9 = tl.where(tmp4, tmp7, tmp8) tmp10 = tmp0 >= tmp3 tl.full([1], 17, tl.int64) tmp13 = tl.load(in_ptr1 + (16 * x3 + (-1 + x2)), tmp10 & xmask, eviction_policy='evict_last', other=0.0) tmp14 = tl.where(tmp4, tmp9, tmp13) tmp16 = tmp14 + tmp15 tl.store(out_ptr0 + x4, tmp16, xmask) @triton.jit def triton_poi_fused_cat_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 12 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + x0, tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + (-4 + x0), tmp9 & xmask, eviction_policy= 'evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tl.full([1], 12, tl.int64) tmp14 = tl.load(in_ptr2 + (-8 + x0), tmp11 & xmask, eviction_policy= 'evict_last', other=0.0) tmp15 = tl.where(tmp9, tmp10, tmp14) tmp16 = tl.where(tmp4, tmp5, tmp15) tl.store(out_ptr0 + x0, tmp16, xmask) @triton.jit def triton_poi_fused_mul_transpose_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl. constexpr): ynumel = 16 xnumel = 17 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 4 tmp0 = tl.load(in_ptr0 + (y3 + 16 * x2), xmask & ymask, eviction_policy ='evict_last') tmp1 = y0 tl.full([1, 1], 0, tl.int64) tmp4 = tl.full([1, 1], 4, tl.int64) tmp5 = tmp1 < tmp4 tmp6 = tl.load(in_ptr1 + tl.broadcast_to(y0, [XBLOCK, YBLOCK]), tmp5 & xmask & ymask, eviction_policy='evict_last', other=0.0) tmp7 = tmp1 >= tmp4 tmp8 = tl.full([1, 1], 8, tl.int64) tmp9 = tmp1 < tmp8 tmp10 = tmp7 & tmp9 tmp11 = tl.load(in_ptr2 + tl.broadcast_to(-4 + y0, [XBLOCK, YBLOCK]), tmp10 & xmask & ymask, eviction_policy='evict_last', other=0.0) tmp12 = tmp1 >= tmp8 tl.full([1, 1], 12, tl.int64) tmp15 = tl.load(in_ptr3 + tl.broadcast_to(-8 + y0, [XBLOCK, YBLOCK]), tmp12 & xmask & ymask, eviction_policy='evict_last', other=0.0) tmp16 = tl.where(tmp10, tmp11, tmp15) tmp17 = tl.where(tmp5, tmp6, tmp16) tmp18 = tmp0 + tmp17 tmp19 = 1.0 tmp20 = tmp18 * tmp19 tl.store(out_ptr0 + (x2 + 17 * y3), tmp20, xmask & ymask) tl.store(out_ptr1 + (y3 + 16 * x2), tmp20, xmask & ymask) @triton.jit def triton_poi_fused_mul_transpose_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl. constexpr): ynumel = 16 xnumel = 17 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 4 tmp0 = tl.load(in_ptr0 + (y3 + 16 * x2), xmask & ymask, eviction_policy ='evict_last') tmp1 = 4 + y0 tl.full([1, 1], 0, tl.int64) tmp4 = tl.full([1, 1], 4, tl.int64) tmp5 = tmp1 < tmp4 tmp6 = tl.load(in_ptr1 + tl.broadcast_to(4 + y0, [XBLOCK, YBLOCK]), tmp5 & xmask & ymask, eviction_policy='evict_last', other=0.0) tmp7 = tmp1 >= tmp4 tmp8 = tl.full([1, 1], 8, tl.int64) tmp9 = tmp1 < tmp8 tmp10 = tmp7 & tmp9 tmp11 = tl.load(in_ptr2 + tl.broadcast_to(y0, [XBLOCK, YBLOCK]), tmp10 & xmask & ymask, eviction_policy='evict_last', other=0.0) tmp12 = tmp1 >= tmp8 tl.full([1, 1], 12, tl.int64) tmp15 = tl.load(in_ptr3 + tl.broadcast_to(-4 + y0, [XBLOCK, YBLOCK]), tmp12 & xmask & ymask, eviction_policy='evict_last', other=0.0) tmp16 = tl.where(tmp10, tmp11, tmp15) tmp17 = tl.where(tmp5, tmp6, tmp16) tmp18 = tmp0 + tmp17 tmp19 = 1.0 tmp20 = tmp18 * tmp19 tl.store(out_ptr0 + (x2 + 17 * y3), tmp20, xmask & ymask) tl.store(out_ptr1 + (y3 + 16 * x2), tmp20, xmask & ymask) @triton.jit def triton_per_fused__safe_softmax_5(in_ptr0, out_ptr3, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 272 rnumel = 17 RBLOCK: tl.constexpr = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] rmask = rindex < rnumel r1 = rindex x0 = xindex x2 = xindex % 68 x3 = xindex // 68 tmp0 = tl.load(in_ptr0 + (r1 + 17 * x0), rmask & xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(rmask & xmask, tmp1, float('-inf')) tmp4 = triton_helpers.max2(tmp3, 1)[:, None] tmp5 = tmp0 - tmp4 tmp6 = tl_math.exp(tmp5) tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.where(rmask & xmask, tmp7, 0) tmp10 = tl.sum(tmp9, 1)[:, None] tmp11 = float('-inf') tmp12 = tmp0 == tmp11 tmp13 = tmp12 == 0 tmp14 = tmp13.to(tl.int64) tmp15 = tmp14 != 0 tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK]) tmp18 = tl.where(rmask & xmask, tmp16, 0) tmp19 = triton_helpers.any(tmp18, 1)[:, None] tmp20 = tmp19 == 0 tmp21 = tmp6 / tmp10 tmp22 = 0.0 tmp23 = tl.where(tmp20, tmp22, tmp21) tl.store(out_ptr3 + (r1 + 17 * x2 + 1184 * x3), tmp23, rmask & xmask) @triton.jit def triton_poi_fused_bmm_6(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4624 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 289 x1 = xindex // 289 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 289 * (x1 % 4) + 1184 * (x1 // 4)), xmask) tl.store(out_ptr0 + x2, tmp0, xmask) @triton.jit def triton_poi_fused_clone_7(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 17 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 17 * x1), xmask & ymask, eviction_policy ='evict_last') tl.store(out_ptr0 + (x1 + 16 * y0), tmp0, xmask & ymask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (17, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (4, 4), (4, 1)) assert_size_stride(primals_10, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((1, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_per_fused_mean_0[grid(16)](primals_1, buf0, 16, 16, XBLOCK=1, num_warps=2, num_stages=1) buf1 = empty_strided_cuda((17, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_cat_1[grid(272)](buf0, primals_1, primals_2, buf1, 272, XBLOCK=256, num_warps=4, num_stages=1) del buf0 del primals_1 del primals_2 buf2 = empty_strided_cuda((68, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (68, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf2) buf3 = empty_strided_cuda((68, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (68, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf3) buf4 = empty_strided_cuda((12,), (1,), torch.float32) triton_poi_fused_cat_2[grid(12)](primals_6, primals_7, primals_8, buf4, 12, XBLOCK=16, num_warps=1, num_stages=1) buf5 = empty_strided_cuda((68, 4), (4, 1), torch.float32) extern_kernels.addmm(reinterpret_tensor(buf4, (4,), (1,), 8), reinterpret_tensor(buf1, (68, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), alpha=1, beta =1, out=buf5) del buf4 buf6 = empty_strided_cuda((4, 4, 17, 1), (68, 17, 1, 1), torch.float32) buf17 = empty_strided_cuda((16, 1, 17), (1, 1, 16), torch.float32) triton_poi_fused_mul_transpose_3[grid(16, 17)](buf2, primals_6, primals_7, primals_8, buf6, buf17, 16, 17, XBLOCK=32, YBLOCK=16, num_warps=4, num_stages=1) buf7 = reinterpret_tensor(buf2, (4, 4, 1, 17), (68, 17, 17, 1), 0) del buf2 buf18 = empty_strided_cuda((16, 17, 1), (1, 16, 1), torch.float32) triton_poi_fused_mul_transpose_4[grid(16, 17)](buf3, primals_6, primals_7, primals_8, buf7, buf18, 16, 17, XBLOCK=32, YBLOCK=8, num_warps=4, num_stages=1) del buf3 del primals_6 del primals_7 del primals_8 buf8 = empty_strided_cuda((16, 17, 17), (289, 17, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf6, (16, 17, 1), (17, 1, 0), 0), reinterpret_tensor(buf7, (16, 1, 17), (17, 0, 1), 0), out=buf8) buf12 = empty_strided_cuda((4, 4, 17, 17), (1184, 289, 17, 1), torch.float32) triton_per_fused__safe_softmax_5[grid(272)](buf8, buf12, 272, 17, XBLOCK=1, num_warps=2, num_stages=1) buf13 = buf8 del buf8 triton_poi_fused_bmm_6[grid(4624)](buf12, buf13, 4624, XBLOCK=256, num_warps=4, num_stages=1) buf14 = reinterpret_tensor(buf7, (16, 17, 1), (17, 1, 1), 0) del buf7 extern_kernels.bmm(buf13, reinterpret_tensor(buf5, (16, 17, 1), (1, 16, 0), 0), out=buf14) del buf13 buf15 = reinterpret_tensor(buf6, (17, 4, 4, 1), (16, 4, 1, 1), 0) del buf6 triton_poi_fused_clone_7[grid(17, 16)](buf14, buf15, 17, 16, XBLOCK =16, YBLOCK=32, num_warps=4, num_stages=1) buf16 = reinterpret_tensor(buf14, (68, 4), (4, 1), 0) del buf14 extern_kernels.addmm(primals_10, reinterpret_tensor(buf15, (68, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf16) del primals_10 return reinterpret_tensor(buf16, (4, 4), (4, 1), 0), reinterpret_tensor( buf1, (68, 4), (4, 1), 0), buf12, reinterpret_tensor(buf15, (68, 4), (4, 1), 0), primals_9, reinterpret_tensor(buf5, (16, 1, 17), (1, 1, 16), 0), buf17, buf18, primals_5, primals_4, primals_3 class AttentionPool2dNew(nn.Module): def __init__(self, spacial_dim: 'int', embed_dim: 'int', num_heads: 'int', output_dim: 'int'=None): super().__init__() self.positional_embedding = nn.Parameter(torch.randn(spacial_dim ** 2 + 1, embed_dim) / embed_dim ** 0.5) self.k_proj = nn.Linear(embed_dim, embed_dim) self.q_proj = nn.Linear(embed_dim, embed_dim) self.v_proj = nn.Linear(embed_dim, embed_dim) self.c_proj = nn.Linear(embed_dim, output_dim or embed_dim) self.num_heads = num_heads def forward(self, input_0): primals_2 = self.positional_embedding primals_3 = self.k_proj.weight primals_6 = self.k_proj.bias primals_4 = self.q_proj.weight primals_7 = self.q_proj.bias primals_5 = self.v_proj.weight primals_8 = self.v_proj.bias primals_9 = self.c_proj.weight primals_10 = self.c_proj.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10]) return output[0]
Artanic30/RentalPrediction
AttentionPool2d
false
2,015
[ "MIT" ]
0
5804ab9b453d2a40bce2bb304c31efc98a803ed8
https://github.com/Artanic30/RentalPrediction/tree/5804ab9b453d2a40bce2bb304c31efc98a803ed8
import torch import torch.nn.functional as F from torch import nn class Model(nn.Module): def __init__(self, spacial_dim: 'int', embed_dim: 'int', num_heads: 'int', output_dim: 'int'=None): super().__init__() self.positional_embedding = nn.Parameter(torch.randn(spacial_dim ** 2 + 1, embed_dim) / embed_dim ** 0.5) self.k_proj = nn.Linear(embed_dim, embed_dim) self.q_proj = nn.Linear(embed_dim, embed_dim) self.v_proj = nn.Linear(embed_dim, embed_dim) self.c_proj = nn.Linear(embed_dim, output_dim or embed_dim) self.num_heads = num_heads def forward(self, x): x = x.reshape(x.shape[0], x.shape[1], x.shape[2] * x.shape[3]).permute( 2, 0, 1) x = torch.cat([x.mean(dim=0, keepdim=True), x], dim=0) x = x + self.positional_embedding[:, None, :] x, _ = F.multi_head_attention_forward(query=x, key=x, value=x, embed_dim_to_check=x.shape[-1], num_heads=self.num_heads, q_proj_weight=self.q_proj.weight, k_proj_weight=self.k_proj. weight, v_proj_weight=self.v_proj.weight, in_proj_weight=None, in_proj_bias=torch.cat([self.q_proj.bias, self.k_proj.bias, self.v_proj.bias]), bias_k=None, bias_v=None, add_zero_attn= False, dropout_p=0, out_proj_weight=self.c_proj.weight, out_proj_bias=self.c_proj.bias, use_separate_proj_weight=True, training=self.training, need_weights=False) return x[0] def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 4, 4]
GDeconv1DBlock
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/sz/cszryrj5kmfztyrg6r3zq6t6rqkltip5llc24nn3ozvittnostjb.py # Topologically Sorted Source Nodes: [h, h_1], Original ATen: [aten.convolution, aten._prelu_kernel] # Source node to ATen node mapping: # h => convolution # h_1 => gt, mul, where # Graph fragment: # %convolution : [num_users=4] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [4], [0], [1], True, [0], 1), kwargs = {}) # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution, 0), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view, %convolution), kwargs = {}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %convolution, %mul), kwargs = {}) triton_poi_fused__prelu_kernel_convolution_0 = async_compile.triton('triton_poi_fused__prelu_kernel_convolution_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__prelu_kernel_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__prelu_kernel_convolution_0(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp6 = tmp5 * tmp2 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(in_out_ptr0 + (x3), tmp2, xmask) tl.store(out_ptr0 + (x3), tmp7, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [h], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(4,), padding=(0,), dilation=(1,), transposed=True, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 16), (64, 16, 1)) buf1 = buf0; del buf0 # reuse buf2 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32) # Topologically Sorted Source Nodes: [h, h_1], Original ATen: [aten.convolution, aten._prelu_kernel] stream0 = get_raw_stream(0) triton_poi_fused__prelu_kernel_convolution_0.run(buf1, primals_2, primals_4, buf2, 256, grid=grid(256), stream=stream0) del primals_2 return (buf2, primals_1, primals_3, primals_4, buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn from torch.nn.utils.spectral_norm import spectral_norm def build_norm_layer(norm_type, param=None, num_feats=None): if norm_type == 'bnorm': return nn.BatchNorm1d(num_feats) elif norm_type == 'snorm': spectral_norm(param) return None elif norm_type is None: return None else: raise TypeError('Unrecognized norm type: ', norm_type) class GDeconv1DBlock(nn.Module): def __init__(self, ninp, fmaps, kwidth, stride=4, bias=True, norm_type= None, act=None): super().__init__() pad = max(0, (stride - kwidth) // -2) self.deconv = nn.ConvTranspose1d(ninp, fmaps, kwidth, stride=stride, padding=pad) self.norm = build_norm_layer(norm_type, self.deconv, fmaps) if act is not None: self.act = getattr(nn, act)() else: self.act = nn.PReLU(fmaps, init=0) self.kwidth = kwidth self.stride = stride def forward_norm(self, x, norm_layer): if norm_layer is not None: return norm_layer(x) else: return x def forward(self, x): h = self.deconv(x) if self.kwidth % 2 != 0: h = h[:, :, :-1] h = self.forward_norm(h, self.norm) h = self.act(h) return h def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'ninp': 4, 'fmaps': 4, 'kwidth': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn from torch.nn.utils.spectral_norm import spectral_norm assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__prelu_kernel_convolution_0(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp6 = tmp5 * tmp2 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(in_out_ptr0 + x3, tmp2, xmask) tl.store(out_ptr0 + x3, tmp7, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(4,), padding=(0,), dilation=(1,), transposed=True, output_padding=(0 ,), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 16), (64, 16, 1)) buf1 = buf0 del buf0 buf2 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32) get_raw_stream(0) triton_poi_fused__prelu_kernel_convolution_0[grid(256)](buf1, primals_2, primals_4, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 return buf2, primals_1, primals_3, primals_4, buf1 def build_norm_layer(norm_type, param=None, num_feats=None): if norm_type == 'bnorm': return nn.BatchNorm1d(num_feats) elif norm_type == 'snorm': spectral_norm(param) return None elif norm_type is None: return None else: raise TypeError('Unrecognized norm type: ', norm_type) class GDeconv1DBlockNew(nn.Module): def __init__(self, ninp, fmaps, kwidth, stride=4, bias=True, norm_type= None, act=None): super().__init__() pad = max(0, (stride - kwidth) // -2) self.deconv = nn.ConvTranspose1d(ninp, fmaps, kwidth, stride=stride, padding=pad) self.norm = build_norm_layer(norm_type, self.deconv, fmaps) if act is not None: self.act = getattr(nn, act)() else: self.act = nn.PReLU(fmaps, init=0) self.kwidth = kwidth self.stride = stride def forward_norm(self, x, norm_layer): if norm_layer is not None: return norm_layer(x) else: return x def forward(self, input_0): primals_1 = self.deconv.weight primals_2 = self.deconv.bias primals_4 = self.act.weight primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
BaiYuhaoSpiceeYJ/SEGAN_denoise
GDeconv1DBlock
false
2,016
[ "MIT" ]
0
5bf65ae72b9f0a996ae338c53c68c4967e08cd59
https://github.com/BaiYuhaoSpiceeYJ/SEGAN_denoise/tree/5bf65ae72b9f0a996ae338c53c68c4967e08cd59
import torch import torch.nn as nn from torch.nn.utils.spectral_norm import spectral_norm def build_norm_layer(norm_type, param=None, num_feats=None): if norm_type == 'bnorm': return nn.BatchNorm1d(num_feats) elif norm_type == 'snorm': spectral_norm(param) return None elif norm_type is None: return None else: raise TypeError('Unrecognized norm type: ', norm_type) class Model(nn.Module): def __init__(self, ninp, fmaps, kwidth, stride=4, bias=True, norm_type= None, act=None): super().__init__() pad = max(0, (stride - kwidth) // -2) self.deconv = nn.ConvTranspose1d(ninp, fmaps, kwidth, stride=stride, padding=pad) self.norm = build_norm_layer(norm_type, self.deconv, fmaps) if act is not None: self.act = getattr(nn, act)() else: self.act = nn.PReLU(fmaps, init=0) self.kwidth = kwidth self.stride = stride def forward_norm(self, x, norm_layer): if norm_layer is not None: return norm_layer(x) else: return x def forward(self, x): h = self.deconv(x) if self.kwidth % 2 != 0: h = h[:, :, :-1] h = self.forward_norm(h, self.norm) h = self.act(h) return h def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [4, 4, 4]
GLU
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/si/csi3q2lvu3kn5mel4tdzl2vtzommba4fd5qu3gyxcskc5f7bdxxu.py # Topologically Sorted Source Nodes: [sigmoid, mul], Original ATen: [aten.sigmoid, aten.mul] # Source node to ATen node mapping: # mul => mul # sigmoid => sigmoid # Graph fragment: # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%slice_4,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%slice_2, %sigmoid), kwargs = {}) triton_poi_fused_mul_sigmoid_0 = async_compile.triton('triton_poi_fused_mul_sigmoid_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sigmoid_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_sigmoid_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 32 x1 = (xindex // 32) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask) tmp1 = tl.load(in_ptr0 + (32 + x0 + (64*x1)), xmask) tmp2 = tl.sigmoid(tmp1) tmp3 = tmp0 * tmp2 tl.store(out_ptr0 + (x2), tmp3, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 2, 4, 4), (32, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [sigmoid, mul], Original ATen: [aten.sigmoid, aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_mul_sigmoid_0.run(arg0_1, buf0, 128, grid=grid(128), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.utils.data import torch.nn.parallel class GLU(nn.Module): def __init__(self): super(GLU, self).__init__() def forward(self, x): nc = x.size(1) assert nc % 2 == 0, 'channels dont divide 2!' nc = int(nc / 2) return x[:, :nc] * torch.sigmoid(x[:, nc:]) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.utils.data import torch.nn.parallel assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mul_sigmoid_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 32 x1 = xindex // 32 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tmp1 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask) tmp2 = tl.sigmoid(tmp1) tmp3 = tmp0 * tmp2 tl.store(out_ptr0 + x2, tmp3, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 2, 4, 4), (32, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_sigmoid_0[grid(128)](arg0_1, buf0, 128, XBLOCK =128, num_warps=4, num_stages=1) del arg0_1 return buf0, class GLUNew(nn.Module): def __init__(self): super(GLUNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
BedirYilmaz/picturate-mwml
GLU
false
2,017
[ "MIT" ]
0
e0dd1bb9df0e0ee5a9cbefba9ac7ada19a2cc41c
https://github.com/BedirYilmaz/picturate-mwml/tree/e0dd1bb9df0e0ee5a9cbefba9ac7ada19a2cc41c
import torch import torch.nn as nn import torch.utils.data import torch.nn.parallel class Model(nn.Module): def __init__(self): super().__init__() def forward(self, x): nc = x.size(1) assert nc % 2 == 0, 'channels dont divide 2!' nc = int(nc / 2) return x[:, :nc] * torch.sigmoid(x[:, nc:]) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
MLP
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/zy/czylxf6rfbnbz2ddgd3xovxwjqnfen7sgqej5mnv46j2fekwnniz.py # Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # relu => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[131072], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 131072 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 2048 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, None) tl.store(out_ptr0 + (x2), tmp6, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (2048, 4), (4, 1)) assert_size_stride(primals_2, (2048, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 2048), (2048, 1)) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 2048), (2048, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 2048), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 2048), (32768, 8192, 2048, 1), 0); del buf0 # reuse buf3 = empty_strided_cuda((4, 4, 4, 2048), (32768, 8192, 2048, 1), torch.bool) # Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf3, 131072, grid=grid(131072), stream=stream0) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 2048), (2048, 1), 0), reinterpret_tensor(primals_4, (2048, 4), (1, 2048), 0), alpha=1, beta=1, out=buf2) del primals_5 return (reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 2048), (2048, 1), 0), primals_4, buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((2048, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 2048), (2048, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn import torch.nn as nn import torch.nn.functional as F class MLP(nn.Module): """ This is just an MLP with 1 hidden layer """ def __init__(self, n_units, dropout=0.1): super(MLP, self).__init__() self.w_1 = nn.Linear(n_units, 2048) self.w_2 = nn.Linear(2048, n_units) self.dropout = nn.Dropout(dropout) def forward(self, x): return self.w_2(self.dropout(F.relu(self.w_1(x)))) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'n_units': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 2048 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, None) tl.store(out_ptr0 + x2, tmp6, None) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (2048, 4), (4, 1)) assert_size_stride(primals_2, (2048,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 2048), (2048, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 2048), (2048, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 2048), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 2048), (32768, 8192, 2048, 1), 0) del buf0 buf3 = empty_strided_cuda((4, 4, 4, 2048), (32768, 8192, 2048, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(131072)](buf1, primals_2, buf3, 131072, XBLOCK=512, num_warps=8, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 2048), (2048, 1), 0), reinterpret_tensor(primals_4, (2048, 4), (1, 2048), 0), alpha=1, beta=1, out=buf2) del primals_5 return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 2048), (2048, 1), 0), primals_4, buf3 class MLPNew(nn.Module): """ This is just an MLP with 1 hidden layer """ def __init__(self, n_units, dropout=0.1): super(MLPNew, self).__init__() self.w_1 = nn.Linear(n_units, 2048) self.w_2 = nn.Linear(2048, n_units) self.dropout = nn.Dropout(dropout) def forward(self, input_0): primals_1 = self.w_1.weight primals_2 = self.w_1.bias primals_4 = self.w_2.weight primals_5 = self.w_2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
AmineBellahsen/IFT6135_representation_learning
MLP
false
2,018
[ "MIT" ]
0
d93865a2e1d7b42d4808927ce928dc875a436730
https://github.com/AmineBellahsen/IFT6135_representation_learning/tree/d93865a2e1d7b42d4808927ce928dc875a436730
import torch import torch.nn import torch.nn as nn import torch.nn.functional as F class Model(nn.Module): """ This is just an MLP with 1 hidden layer """ def __init__(self, n_units, dropout=0.1): super().__init__() self.w_1 = nn.Linear(n_units, 2048) self.w_2 = nn.Linear(2048, n_units) self.dropout = nn.Dropout(dropout) def forward(self, x): return self.w_2(self.dropout(F.relu(self.w_1(x)))) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4]
EncoderImageWeightNormPrecomp
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/vx/cvxzmthv4i2niuhjkx7pdwegys74ubmwp36fuzpk743r7lkqg4tm.py # Topologically Sorted Source Nodes: [_weight_norm], Original ATen: [aten.norm, aten.div, aten.mul] # Source node to ATen node mapping: # _weight_norm => div, mul, pow_1, pow_2, sum_1 # Graph fragment: # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_2, 2), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, None), kwargs = {}) # %pow_2 : [num_users=2] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_1, %pow_2), kwargs = {}) # %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %div), kwargs = {}) triton_per_fused_div_mul_norm_0 = async_compile.triton('triton_per_fused_div_mul_norm_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {4: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=(4,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_div_mul_norm_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_div_mul_norm_0(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp6 = tl.load(in_ptr1 + (0)) tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.sum(tmp2, 1)[:, None] tmp5 = libdevice.sqrt(tmp4) tmp8 = tmp7 / tmp5 tmp9 = tmp0 * tmp8 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp5, None) tl.store(out_ptr0 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp9, None) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/x2/cx2e5rdeusxxcqfojnklfpvernnjnuzqu3eanmgcyclgrncdxyae.py # Topologically Sorted Source Nodes: [pow_1, sum_1, sqrt, norm, X], Original ATen: [aten.pow, aten.sum, aten.sqrt, aten.add, aten.div] # Source node to ATen node mapping: # X => div_1 # norm => add # pow_1 => pow_3 # sqrt => sqrt # sum_1 => sum_2 # Graph fragment: # %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%view_1, 2), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_3, [-1], True), kwargs = {}) # %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%sum_2,), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sqrt, 1e-08), kwargs = {}) # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_1, %add), kwargs = {}) triton_poi_fused_add_div_pow_sqrt_sum_1 = async_compile.triton('triton_poi_fused_add_div_pow_sqrt_sum_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_pow_sqrt_sum_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_pow_sqrt_sum_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-08 tmp14 = tmp12 + tmp13 tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + (x2), tmp15, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (), ()) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0; del buf0 # reuse buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [_weight_norm], Original ATen: [aten.norm, aten.div, aten.mul] stream0 = get_raw_stream(0) triton_per_fused_div_mul_norm_0.run(buf1, primals_2, primals_1, buf2, 1, 16, grid=grid(1), stream=stream0) buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [features], Original ATen: [aten.addmm] extern_kernels.addmm(primals_3, reinterpret_tensor(primals_4, (64, 4), (4, 1), 0), reinterpret_tensor(buf2, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf3) del primals_3 buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [pow_1, sum_1, sqrt, norm, X], Original ATen: [aten.pow, aten.sum, aten.sqrt, aten.add, aten.div] triton_poi_fused_add_div_pow_sqrt_sum_1.run(buf3, buf4, 256, grid=grid(256), stream=stream0) return (buf4, buf2, primals_1, primals_2, buf1, reinterpret_tensor(primals_4, (64, 4), (4, 1), 0), buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((), (), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from collections import OrderedDict import torch.nn as nn import torch.nn.init from torch.nn.utils.weight_norm import weight_norm def l2norm(X, dim, eps=1e-08): """L2-normalize columns of X """ norm = torch.pow(X, 2).sum(dim=dim, keepdim=True).sqrt() + eps X = torch.div(X, norm) return X class EncoderImageWeightNormPrecomp(nn.Module): def __init__(self, img_dim, embed_size, no_imgnorm=False): super(EncoderImageWeightNormPrecomp, self).__init__() self.embed_size = embed_size self.no_imgnorm = no_imgnorm self.fc = weight_norm(nn.Linear(img_dim, embed_size), dim=None) def forward(self, images): """Extract image feature vectors.""" features = self.fc(images) if not self.no_imgnorm: features = l2norm(features, dim=-1) return features def load_state_dict(self, state_dict): """Copies parameters. overwritting the default one to accept state_dict from Full model """ own_state = self.state_dict() new_state = OrderedDict() for name, param in state_dict.items(): if name in own_state: new_state[name] = param super(EncoderImageWeightNormPrecomp, self).load_state_dict(new_state) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'img_dim': 4, 'embed_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from collections import OrderedDict import torch.nn as nn import torch.nn.init from torch.nn.utils.weight_norm import weight_norm assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_div_mul_norm_0(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp6 = tl.load(in_ptr1 + 0) tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.sum(tmp2, 1)[:, None] tmp5 = libdevice.sqrt(tmp4) tmp8 = tmp7 / tmp5 tmp9 = tmp0 * tmp8 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp5, None) tl.store(out_ptr0 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp9, None) @triton.jit def triton_poi_fused_add_div_pow_sqrt_sum_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-08 tmp14 = tmp12 + tmp13 tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + x2, tmp15, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (), ()) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_per_fused_div_mul_norm_0[grid(1)](buf1, primals_2, primals_1, buf2, 1, 16, XBLOCK=1, num_warps=2, num_stages=1) buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_3, reinterpret_tensor(primals_4, (64, 4), (4, 1), 0), reinterpret_tensor(buf2, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf3) del primals_3 buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_div_pow_sqrt_sum_1[grid(256)](buf3, buf4, 256, XBLOCK=256, num_warps=4, num_stages=1) return buf4, buf2, primals_1, primals_2, buf1, reinterpret_tensor(primals_4 , (64, 4), (4, 1), 0), buf3 def l2norm(X, dim, eps=1e-08): """L2-normalize columns of X """ norm = torch.pow(X, 2).sum(dim=dim, keepdim=True).sqrt() + eps X = torch.div(X, norm) return X class EncoderImageWeightNormPrecompNew(nn.Module): def __init__(self, img_dim, embed_size, no_imgnorm=False): super(EncoderImageWeightNormPrecompNew, self).__init__() self.embed_size = embed_size self.no_imgnorm = no_imgnorm self.fc = weight_norm(nn.Linear(img_dim, embed_size), dim=None) def load_state_dict(self, state_dict): """Copies parameters. overwritting the default one to accept state_dict from Full model """ own_state = self.state_dict() new_state = OrderedDict() for name, param in state_dict.items(): if name in own_state: new_state[name] = param super(EncoderImageWeightNormPrecompNew, self).load_state_dict(new_state ) def forward(self, input_0): primals_3 = self.fc.bias primals_1 = self.fc.weight_g primals_2 = self.fc.weight_v primals_4 = input_0 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
Ballester/SCAN
EncoderImageWeightNormPrecomp
false
2,019
[ "Apache-2.0" ]
0
4a003f60d3e45e5dd16969745e4b182fe705e758
https://github.com/Ballester/SCAN/tree/4a003f60d3e45e5dd16969745e4b182fe705e758
import torch from collections import OrderedDict import torch.nn as nn import torch.nn.init from torch.nn.utils.weight_norm import weight_norm def l2norm(X, dim, eps=1e-08): """L2-normalize columns of X """ norm = torch.pow(X, 2).sum(dim=dim, keepdim=True).sqrt() + eps X = torch.div(X, norm) return X class Model(nn.Module): def __init__(self, img_dim, embed_size, no_imgnorm=False): super().__init__() self.embed_size = embed_size self.no_imgnorm = no_imgnorm self.fc = weight_norm(nn.Linear(img_dim, embed_size), dim=None) def forward(self, images): """Extract image feature vectors.""" features = self.fc(images) if not self.no_imgnorm: features = l2norm(features, dim=-1) return features def load_state_dict(self, state_dict): """Copies parameters. overwritting the default one to accept state_dict from Full model """ own_state = self.state_dict() new_state = OrderedDict() for name, param in state_dict.items(): if name in own_state: new_state[name] = param super(EncoderImageWeightNormPrecomp, self).load_state_dict(new_state) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 4]
EncoderImagePrecomp
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/5x/c5xuny2q3w4rfyqbc6h5bf6h7phwdd6eg4xkubmoahyiibq64bkx.py # Topologically Sorted Source Nodes: [pow_1, sum_1, sqrt, norm, X], Original ATen: [aten.pow, aten.sum, aten.sqrt, aten.add, aten.div] # Source node to ATen node mapping: # X => div # norm => add # pow_1 => pow_1 # sqrt => sqrt # sum_1 => sum_1 # Graph fragment: # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%view_1, 2), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [-1], True), kwargs = {}) # %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%sum_1,), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sqrt, 1e-08), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_1, %add), kwargs = {}) triton_poi_fused_add_div_pow_sqrt_sum_0 = async_compile.triton('triton_poi_fused_add_div_pow_sqrt_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_pow_sqrt_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_pow_sqrt_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-08 tmp14 = tmp12 + tmp13 tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + (x2), tmp15, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [features], Original ATen: [aten.addmm] extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [pow_1, sum_1, sqrt, norm, X], Original ATen: [aten.pow, aten.sum, aten.sqrt, aten.add, aten.div] stream0 = get_raw_stream(0) triton_poi_fused_add_div_pow_sqrt_sum_0.run(buf0, buf1, 256, grid=grid(256), stream=stream0) return (buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import numpy as np from collections import OrderedDict import torch.nn as nn import torch.nn.init def l2norm(X, dim, eps=1e-08): """L2-normalize columns of X """ norm = torch.pow(X, 2).sum(dim=dim, keepdim=True).sqrt() + eps X = torch.div(X, norm) return X class EncoderImagePrecomp(nn.Module): def __init__(self, img_dim, embed_size, no_imgnorm=False): super(EncoderImagePrecomp, self).__init__() self.embed_size = embed_size self.no_imgnorm = no_imgnorm self.fc = nn.Linear(img_dim, embed_size) self.init_weights() def init_weights(self): """Xavier initialization for the fully connected layer """ r = np.sqrt(6.0) / np.sqrt(self.fc.in_features + self.fc.out_features) self.fc.weight.data.uniform_(-r, r) self.fc.bias.data.fill_(0) def forward(self, images): """Extract image feature vectors.""" features = self.fc(images) if not self.no_imgnorm: features = l2norm(features, dim=-1) return features def load_state_dict(self, state_dict): """Copies parameters. overwritting the default one to accept state_dict from Full model """ own_state = self.state_dict() new_state = OrderedDict() for name, param in state_dict.items(): if name in own_state: new_state[name] = param super(EncoderImagePrecomp, self).load_state_dict(new_state) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'img_dim': 4, 'embed_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import numpy as np from collections import OrderedDict import torch.nn as nn import torch.nn.init assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_div_pow_sqrt_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-08 tmp14 = tmp12 + tmp13 tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + x2, tmp15, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_pow_sqrt_sum_0[grid(256)](buf0, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1) return buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0 def l2norm(X, dim, eps=1e-08): """L2-normalize columns of X """ norm = torch.pow(X, 2).sum(dim=dim, keepdim=True).sqrt() + eps X = torch.div(X, norm) return X class EncoderImagePrecompNew(nn.Module): def __init__(self, img_dim, embed_size, no_imgnorm=False): super(EncoderImagePrecompNew, self).__init__() self.embed_size = embed_size self.no_imgnorm = no_imgnorm self.fc = nn.Linear(img_dim, embed_size) self.init_weights() def init_weights(self): """Xavier initialization for the fully connected layer """ r = np.sqrt(6.0) / np.sqrt(self.fc.in_features + self.fc.out_features) self.fc.weight.data.uniform_(-r, r) self.fc.bias.data.fill_(0) def load_state_dict(self, state_dict): """Copies parameters. overwritting the default one to accept state_dict from Full model """ own_state = self.state_dict() new_state = OrderedDict() for name, param in state_dict.items(): if name in own_state: new_state[name] = param super(EncoderImagePrecompNew, self).load_state_dict(new_state) def forward(self, input_0): primals_1 = self.fc.weight primals_2 = self.fc.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
Ballester/SCAN
EncoderImagePrecomp
false
2,020
[ "Apache-2.0" ]
0
4a003f60d3e45e5dd16969745e4b182fe705e758
https://github.com/Ballester/SCAN/tree/4a003f60d3e45e5dd16969745e4b182fe705e758
import torch import numpy as np from collections import OrderedDict import torch.nn as nn import torch.nn.init def l2norm(X, dim, eps=1e-08): """L2-normalize columns of X """ norm = torch.pow(X, 2).sum(dim=dim, keepdim=True).sqrt() + eps X = torch.div(X, norm) return X class Model(nn.Module): def __init__(self, img_dim, embed_size, no_imgnorm=False): super().__init__() self.embed_size = embed_size self.no_imgnorm = no_imgnorm self.fc = nn.Linear(img_dim, embed_size) self.init_weights() def init_weights(self): """Xavier initialization for the fully connected layer """ r = np.sqrt(6.0) / np.sqrt(self.fc.in_features + self.fc.out_features) self.fc.weight.data.uniform_(-r, r) self.fc.bias.data.fill_(0) def forward(self, images): """Extract image feature vectors.""" features = self.fc(images) if not self.no_imgnorm: features = l2norm(features, dim=-1) return features def load_state_dict(self, state_dict): """Copies parameters. overwritting the default one to accept state_dict from Full model """ own_state = self.state_dict() new_state = OrderedDict() for name, param in state_dict.items(): if name in own_state: new_state[name] = param super(EncoderImagePrecomp, self).load_state_dict(new_state) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 4]
SimpleMLP
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/v7/cv7zazascu4rpkkwoxbiwk6c2le2e6wshdhae73bmaoapelvwguv.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # x => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) # %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, xmask) tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/tz/ctzengiku4fpyacmhgujhvarriu4wwirpgay5u6a5wsrq2v75w32.py # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten._softmax] # Source node to ATen node mapping: # x_2 => amax, exp, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_5, [1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_5, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 12 x2 = (xindex // 48) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (48*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (12 + x0 + (48*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (24 + x0 + (48*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (36 + x0 + (48*x2)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x3), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/bv/cbvhs2xxondnsndwd2fomugf22ux53yglufxq4ntkf3shlcw366c.py # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten._softmax] # Source node to ATen node mapping: # x_2 => div, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 12 x2 = (xindex // 48) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (48*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (12 + x0 + (48*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (24 + x0 + (48*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (36 + x0 + (48*x2)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x3), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (16, 4), (4, 1)) assert_size_stride(primals_2, (16, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (16, 16), (16, 1)) assert_size_stride(primals_5, (16, ), (1, )) assert_size_stride(primals_6, (3, 16), (16, 1)) assert_size_stride(primals_7, (3, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 16), (16, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 16), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 16), (256, 64, 16, 1), 0); del buf0 # reuse buf8 = empty_strided_cuda((4, 4, 4, 16), (256, 64, 16, 1), torch.bool) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf8, 1024, grid=grid(1024), stream=stream0) del primals_2 buf2 = empty_strided_cuda((64, 16), (16, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf1, (64, 16), (16, 1), 0), reinterpret_tensor(primals_4, (16, 16), (1, 16), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 16), (256, 64, 16, 1), 0); del buf2 # reuse buf7 = empty_strided_cuda((4, 4, 4, 16), (256, 64, 16, 1), torch.bool) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_0.run(buf3, primals_5, buf7, 1024, grid=grid(1024), stream=stream0) del primals_5 buf4 = empty_strided_cuda((64, 3), (3, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.addmm] extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 16), (16, 1), 0), reinterpret_tensor(primals_6, (16, 3), (1, 16), 0), alpha=1, beta=1, out=buf4) del primals_7 buf5 = empty_strided_cuda((4, 4, 4, 3), (48, 12, 3, 1), torch.float32) # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf4, buf5, 192, grid=grid(192), stream=stream0) buf6 = reinterpret_tensor(buf4, (4, 4, 4, 3), (48, 12, 3, 1), 0); del buf4 # reuse # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten._softmax] triton_poi_fused__softmax_2.run(buf5, buf6, 192, grid=grid(192), stream=stream0) del buf5 return (buf6, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 16), (16, 1), 0), reinterpret_tensor(buf3, (64, 16), (16, 1), 0), buf6, primals_6, buf7, primals_4, buf8, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((16, 16), (16, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((3, 16), (16, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((3, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F import torch.onnx class SimpleMLP(nn.Module): def __init__(self): super(SimpleMLP, self).__init__() self.l1 = nn.Linear(4, 16) self.l2 = nn.Linear(16, 16) self.l3 = nn.Linear(16, 3) def forward(self, x): x = F.relu(self.l1(x)) x = F.relu(self.l2(x)) x = F.softmax(self.l3(x), dim=1) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch.onnx assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 12 x2 = xindex // 48 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 48 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (12 + x0 + 48 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (24 + x0 + 48 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (36 + x0 + 48 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x3, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 12 x2 = xindex // 48 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 48 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (12 + x0 + 48 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (24 + x0 + 48 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (36 + x0 + 48 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (16, 4), (4, 1)) assert_size_stride(primals_2, (16,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (16, 16), (16, 1)) assert_size_stride(primals_5, (16,), (1,)) assert_size_stride(primals_6, (3, 16), (16, 1)) assert_size_stride(primals_7, (3,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 16), (16, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 16), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 16), (256, 64, 16, 1), 0) del buf0 buf8 = empty_strided_cuda((4, 4, 4, 16), (256, 64, 16, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(1024)](buf1, primals_2, buf8, 1024, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 16), (16, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 16), (16, 1), 0), reinterpret_tensor(primals_4, (16, 16), (1, 16), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 16), (256, 64, 16, 1), 0) del buf2 buf7 = empty_strided_cuda((4, 4, 4, 16), (256, 64, 16, 1), torch.bool) triton_poi_fused_relu_threshold_backward_0[grid(1024)](buf3, primals_5, buf7, 1024, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 3), (3, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 16), (16, 1), 0), reinterpret_tensor(primals_6, (16, 3), (1, 16), 0), alpha=1, beta=1, out=buf4) del primals_7 buf5 = empty_strided_cuda((4, 4, 4, 3), (48, 12, 3, 1), torch.float32) triton_poi_fused__softmax_1[grid(192)](buf4, buf5, 192, XBLOCK=128, num_warps=4, num_stages=1) buf6 = reinterpret_tensor(buf4, (4, 4, 4, 3), (48, 12, 3, 1), 0) del buf4 triton_poi_fused__softmax_2[grid(192)](buf5, buf6, 192, XBLOCK=128, num_warps=4, num_stages=1) del buf5 return buf6, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 16), (16, 1), 0), reinterpret_tensor( buf3, (64, 16), (16, 1), 0), buf6, primals_6, buf7, primals_4, buf8 class SimpleMLPNew(nn.Module): def __init__(self): super(SimpleMLPNew, self).__init__() self.l1 = nn.Linear(4, 16) self.l2 = nn.Linear(16, 16) self.l3 = nn.Linear(16, 3) def forward(self, input_0): primals_1 = self.l1.weight primals_2 = self.l1.bias primals_4 = self.l2.weight primals_5 = self.l2.bias primals_6 = self.l3.weight primals_7 = self.l3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
Ali-ry/azureml-examples
SimpleMLP
false
2,021
[ "MIT" ]
0
817ae89d2766dcafd70937a22cb3a80f100a2906
https://github.com/Ali-ry/azureml-examples/tree/817ae89d2766dcafd70937a22cb3a80f100a2906
import torch import torch.nn as nn import torch.nn.functional as F import torch.onnx class Model(nn.Module): def __init__(self): super().__init__() self.l1 = nn.Linear(4, 16) self.l2 = nn.Linear(16, 16) self.l3 = nn.Linear(16, 3) def forward(self, x): x = F.relu(self.l1(x)) x = F.relu(self.l2(x)) x = F.softmax(self.l3(x), dim=1) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
TVLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/2k/c2k5vlnjaakkj3mchzvvpgyipewojru5n4js4efv5s4x5d74k7ro.py # Topologically Sorted Source Nodes: [sub, pow_1, h_tv, truediv, sub_1, pow_2, w_tv, truediv_1, add, mul, truediv_2], Original ATen: [aten.sub, aten.pow, aten.sum, aten.div, aten.add, aten.mul] # Source node to ATen node mapping: # add => add # h_tv => sum_1 # mul => mul # pow_1 => pow_1 # pow_2 => pow_2 # sub => sub # sub_1 => sub_1 # truediv => div # truediv_1 => div_1 # truediv_2 => div_2 # w_tv => sum_2 # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%slice_3, %slice_7), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%pow_1,), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_1, 48), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%slice_12, %slice_16), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_1, 2), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%pow_2,), kwargs = {}) # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_2, 48), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%div, %div_1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, 2), kwargs = {}) # %div_2 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul, 4), kwargs = {}) triton_per_fused_add_div_mul_pow_sub_sum_0 = async_compile.triton('triton_per_fused_add_div_mul_pow_sub_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=(2,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_mul_pow_sub_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_div_mul_pow_sub_sum_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 192 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = rindex < rnumel r0 = rindex % 12 r1 = (rindex // 12) r2 = rindex % 3 r3 = (rindex // 3) tmp0 = tl.load(in_ptr0 + (4 + r0 + (16*r1)), rmask, other=0.0) tmp1 = tl.load(in_ptr0 + (r0 + (16*r1)), rmask, other=0.0) tmp8 = tl.load(in_ptr0 + (1 + r2 + (4*r3)), rmask, other=0.0) tmp9 = tl.load(in_ptr0 + (r2 + (4*r3)), rmask, other=0.0) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp6 = tl.where(rmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp10 = tmp8 - tmp9 tmp11 = tmp10 * tmp10 tmp12 = tl.broadcast_to(tmp11, [XBLOCK, RBLOCK]) tmp14 = tl.where(rmask, tmp12, 0) tmp15 = tl.sum(tmp14, 1)[:, None] tmp16 = 0.020833333333333332 tmp17 = tmp7 * tmp16 tmp18 = tmp15 * tmp16 tmp19 = tmp17 + tmp18 tmp20 = 2.0 tmp21 = tmp19 * tmp20 tmp22 = 0.25 tmp23 = tmp21 * tmp22 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp23, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf2 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [sub, pow_1, h_tv, truediv, sub_1, pow_2, w_tv, truediv_1, add, mul, truediv_2], Original ATen: [aten.sub, aten.pow, aten.sum, aten.div, aten.add, aten.mul] stream0 = get_raw_stream(0) triton_per_fused_add_div_mul_pow_sub_sum_0.run(buf2, arg0_1, 1, 192, grid=grid(1), stream=stream0) del arg0_1 return (buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.parallel class TVLoss(nn.Module): def __init__(self, tv_loss_weight=1): super(TVLoss, self).__init__() self.tv_loss_weight = tv_loss_weight def forward(self, x): batch_size = x.size()[0] h_x = x.size()[2] w_x = x.size()[3] count_h = self.tensor_size(x[:, :, 1:, :]) count_w = self.tensor_size(x[:, :, :, 1:]) h_tv = torch.pow(x[:, :, 1:, :] - x[:, :, :h_x - 1, :], 2).sum() w_tv = torch.pow(x[:, :, :, 1:] - x[:, :, :, :w_x - 1], 2).sum() return self.tv_loss_weight * 2 * (h_tv / count_h + w_tv / count_w ) / batch_size @staticmethod def tensor_size(t): return t.size()[1] * t.size()[2] * t.size()[3] def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.nn.parallel assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_div_mul_pow_sub_sum_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): rnumel = 192 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] rmask = rindex < rnumel r0 = rindex % 12 r1 = rindex // 12 r2 = rindex % 3 r3 = rindex // 3 tmp0 = tl.load(in_ptr0 + (4 + r0 + 16 * r1), rmask, other=0.0) tmp1 = tl.load(in_ptr0 + (r0 + 16 * r1), rmask, other=0.0) tmp8 = tl.load(in_ptr0 + (1 + r2 + 4 * r3), rmask, other=0.0) tmp9 = tl.load(in_ptr0 + (r2 + 4 * r3), rmask, other=0.0) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp6 = tl.where(rmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp10 = tmp8 - tmp9 tmp11 = tmp10 * tmp10 tmp12 = tl.broadcast_to(tmp11, [XBLOCK, RBLOCK]) tmp14 = tl.where(rmask, tmp12, 0) tmp15 = tl.sum(tmp14, 1)[:, None] tmp16 = 0.020833333333333332 tmp17 = tmp7 * tmp16 tmp18 = tmp15 * tmp16 tmp19 = tmp17 + tmp18 tmp20 = 2.0 tmp21 = tmp19 * tmp20 tmp22 = 0.25 tmp23 = tmp21 * tmp22 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp23, None) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf2 = buf0 del buf0 get_raw_stream(0) triton_per_fused_add_div_mul_pow_sub_sum_0[grid(1)](buf2, arg0_1, 1, 192, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 return buf2, class TVLossNew(nn.Module): def __init__(self, tv_loss_weight=1): super(TVLossNew, self).__init__() self.tv_loss_weight = tv_loss_weight @staticmethod def tensor_size(t): return t.size()[1] * t.size()[2] * t.size()[3] def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
Blatts01/VckImageRestoration
TVLoss
false
2,022
[ "MIT" ]
0
ae4e2221d9d4e236a08722cb92ac5cc88947e311
https://github.com/Blatts01/VckImageRestoration/tree/ae4e2221d9d4e236a08722cb92ac5cc88947e311
import torch import torch.nn as nn import torch.nn.parallel class Model(nn.Module): def __init__(self, tv_loss_weight=1): super().__init__() self.tv_loss_weight = tv_loss_weight def forward(self, x): batch_size = x.size()[0] h_x = x.size()[2] w_x = x.size()[3] count_h = self.tensor_size(x[:, :, 1:, :]) count_w = self.tensor_size(x[:, :, :, 1:]) h_tv = torch.pow(x[:, :, 1:, :] - x[:, :, :h_x - 1, :], 2).sum() w_tv = torch.pow(x[:, :, :, 1:] - x[:, :, :, :w_x - 1], 2).sum() return self.tv_loss_weight * 2 * (h_tv / count_h + w_tv / count_w ) / batch_size @staticmethod def tensor_size(t): return t.size()[1] * t.size()[2] * t.size()[3] def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
Linear3D
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/64/c64ahxnpt5ixqrlolbug3qf6y4u2zqmcjekif2yu4ba4hcze2fom.py # Topologically Sorted Source Nodes: [output], Original ATen: [aten.clone] # Source node to ATen node mapping: # output => clone # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = (xindex // 16) % 4 x2 = (xindex // 64) x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (16*x2) + (64*x1)), xmask) tl.store(out_ptr0 + (x3), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/2g/c2gow746iojnl6yugujjn3non5klwrqsxgmhc4ib5irxlfwbv7ap.py # Topologically Sorted Source Nodes: [output], Original ATen: [aten.clone] # Source node to ATen node mapping: # output => clone_1 # Graph fragment: # %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_1,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_1 = async_compile.triton('triton_poi_fused_clone_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 64 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/ri/cripmqky3lo3skozwggzdw7u7ybokgg2w2o3vcesam3ywnqu73dg.py # Topologically Sorted Source Nodes: [output_1], Original ATen: [aten.add] # Source node to ATen node mapping: # output_1 => add # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_2, %unsqueeze), kwargs = {}) triton_poi_fused_add_2 = async_compile.triton('triton_poi_fused_add_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 4 x2 = (xindex // 16) % 4 tmp0 = tl.load(in_out_ptr0 + (x4), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (4*x2)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x4), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [output], Original ATen: [aten.clone] stream0 = get_raw_stream(0) triton_poi_fused_clone_0.run(primals_1, buf0, 256, grid=grid(256), stream=stream0) del primals_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [output], Original ATen: [aten.clone] triton_poi_fused_clone_1.run(primals_2, buf1, 256, grid=grid(256), stream=stream0) del primals_2 buf2 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [output], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf0, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf1, (16, 4, 4), (16, 4, 1), 0), out=buf2) del buf1 buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf2 # reuse # Topologically Sorted Source Nodes: [output_1], Original ATen: [aten.add] triton_poi_fused_add_2.run(buf3, primals_3, 256, grid=grid(256), stream=stream0) del primals_3 return (reinterpret_tensor(buf3, (4, 4, 4, 4), (16, 64, 4, 1), 0), reinterpret_tensor(buf0, (16, 4, 4), (16, 1, 4), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch import torch as th from torch.nn import Parameter def functional_linear3d(input, weight, bias=None): """ Apply a linear transformation to the incoming data: :math:`y = xA^T + b`. Shape: - Input: :math:`(N, *, in\\_features)` where `*` means any number of additional dimensions - Weight: :math:`(out\\_features, in\\_features)` - Bias: :math:`(out\\_features)` - Output: :math:`(N, *, out\\_features)` """ output = input.transpose(0, 1).matmul(weight) if bias is not None: output += bias.unsqueeze(1) return output.transpose(0, 1) class Linear3D(th.nn.Module): """Applies a linear transformation to the incoming data: :math:`y = Ax + b`. Args: in_features: size of each input sample out_features: size of each output sample bias: If set to False, the layer will not learn an additive bias. Default: ``True`` Shape: - Input: :math:`(N, *, in\\_features)` where :math:`*` means any number of additional dimensions - Output: :math:`(N, *, out\\_features)` where all but the last dimension are the same shape as the input. Attributes: weight: the learnable weights of the module of shape `(out_features x in_features)` bias: the learnable bias of the module of shape `(out_features)` Examples:: >>> m = nn.Linear(3, 20, 30) >>> input = torch.randn(128, 20) >>> output = m(input) >>> print(output.size()) """ def __init__(self, channels, in_features, out_features, batch_size=-1, bias=True, noise=False): super(Linear3D, self).__init__() self.in_features = in_features self.out_features = out_features self.channels = channels if noise: self.in_features += 1 self.weight = Parameter(th.Tensor(channels, self.in_features, out_features)) if bias: self.bias = Parameter(th.Tensor(channels, out_features)) else: self.register_parameter('bias', None) if noise: self.register_buffer('noise', th.Tensor(batch_size, channels, 1)) self.reset_parameters() def reset_parameters(self): stdv = 1.0 / math.sqrt(self.weight.size(1)) self.weight.data.uniform_(-stdv, stdv) if self.bias is not None: self.bias.data.uniform_(-stdv, stdv) def forward(self, input, adj_matrix=None, permutation_matrix=None): input_ = [input] if input.dim() == 2: if permutation_matrix is not None: input_.append(input.unsqueeze(1).expand([input.shape[0], self.channels, permutation_matrix.shape[1]])) elif hasattr(self, 'noise'): input_.append(input.unsqueeze(1).expand([input.shape[0], self.channels, self.in_features - 1])) else: input_.append(input.unsqueeze(1).expand([input.shape[0], self.channels, self.in_features])) if adj_matrix is not None and permutation_matrix is not None: input_.append((input_[-1].transpose(0, 1) @ (adj_matrix.t(). unsqueeze(2) * permutation_matrix)).transpose(0, 1)) elif adj_matrix is not None: input_.append(input_[-1] * adj_matrix.t().unsqueeze(0)) elif permutation_matrix is not None: input_.append((input_[-1].transpose(0, 1) @ permutation_matrix).t() ) if hasattr(self, 'noise'): self.noise.normal_() input_.append(th.cat([input_[-1], self.noise], 2)) return functional_linear3d(input_[-1], self.weight, self.bias) def extra_repr(self): return 'in_features={}, out_features={}, bias={}'.format(self. in_features, self.out_features, self.bias is not None) def apply_filter(self, permutation_matrix): transpose_weight = self.weight.transpose(1, 2) @ permutation_matrix self.weight = Parameter(transpose_weight.transpose(1, 2)) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'channels': 4, 'in_features': 4, 'out_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import math import torch as th from torch.nn import Parameter assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 % 4 x2 = xindex // 64 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x2 + 64 * x1), xmask) tl.store(out_ptr0 + x3, tmp0, xmask) @triton.jit def triton_poi_fused_clone_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 64 x2 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x2, tmp0, xmask) @triton.jit def triton_poi_fused_add_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 4 x2 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x4, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x4, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(256)](primals_1, buf0, 256, XBLOCK= 128, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_clone_1[grid(256)](primals_2, buf1, 256, XBLOCK= 256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf0, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf1, (16, 4, 4), (16, 4, 1), 0), out=buf2) del buf1 buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf2 triton_poi_fused_add_2[grid(256)](buf3, primals_3, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_3 return reinterpret_tensor(buf3, (4, 4, 4, 4), (16, 64, 4, 1), 0 ), reinterpret_tensor(buf0, (16, 4, 4), (16, 1, 4), 0) def functional_linear3d(input, weight, bias=None): """ Apply a linear transformation to the incoming data: :math:`y = xA^T + b`. Shape: - Input: :math:`(N, *, in\\_features)` where `*` means any number of additional dimensions - Weight: :math:`(out\\_features, in\\_features)` - Bias: :math:`(out\\_features)` - Output: :math:`(N, *, out\\_features)` """ output = input.transpose(0, 1).matmul(weight) if bias is not None: output += bias.unsqueeze(1) return output.transpose(0, 1) class Linear3DNew(th.nn.Module): """Applies a linear transformation to the incoming data: :math:`y = Ax + b`. Args: in_features: size of each input sample out_features: size of each output sample bias: If set to False, the layer will not learn an additive bias. Default: ``True`` Shape: - Input: :math:`(N, *, in\\_features)` where :math:`*` means any number of additional dimensions - Output: :math:`(N, *, out\\_features)` where all but the last dimension are the same shape as the input. Attributes: weight: the learnable weights of the module of shape `(out_features x in_features)` bias: the learnable bias of the module of shape `(out_features)` Examples:: >>> m = nn.Linear(3, 20, 30) >>> input = torch.randn(128, 20) >>> output = m(input) >>> print(output.size()) """ def __init__(self, channels, in_features, out_features, batch_size=-1, bias=True, noise=False): super(Linear3DNew, self).__init__() self.in_features = in_features self.out_features = out_features self.channels = channels if noise: self.in_features += 1 self.weight = Parameter(th.Tensor(channels, self.in_features, out_features)) if bias: self.bias = Parameter(th.Tensor(channels, out_features)) else: self.register_parameter('bias', None) if noise: self.register_buffer('noise', th.Tensor(batch_size, channels, 1)) self.reset_parameters() def reset_parameters(self): stdv = 1.0 / math.sqrt(self.weight.size(1)) self.weight.data.uniform_(-stdv, stdv) if self.bias is not None: self.bias.data.uniform_(-stdv, stdv) def extra_repr(self): return 'in_features={}, out_features={}, bias={}'.format(self. in_features, self.out_features, self.bias is not None) def apply_filter(self, permutation_matrix): transpose_weight = self.weight.transpose(1, 2) @ permutation_matrix self.weight = Parameter(transpose_weight.transpose(1, 2)) def forward(self, input_0): primals_2 = self.weight primals_3 = self.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
BadrYoubiIdrissi/CausalDiscoveryToolbox
Linear3D
false
2,023
[ "MIT" ]
0
1e729d002a64ea1942caecd21b9dc8cc217ea0e2
https://github.com/BadrYoubiIdrissi/CausalDiscoveryToolbox/tree/1e729d002a64ea1942caecd21b9dc8cc217ea0e2
import math import torch import torch as th from torch.nn import Parameter def functional_linear3d(input, weight, bias=None): """ Apply a linear transformation to the incoming data: :math:`y = xA^T + b`. Shape: - Input: :math:`(N, *, in\\_features)` where `*` means any number of additional dimensions - Weight: :math:`(out\\_features, in\\_features)` - Bias: :math:`(out\\_features)` - Output: :math:`(N, *, out\\_features)` """ output = input.transpose(0, 1).matmul(weight) if bias is not None: output += bias.unsqueeze(1) return output.transpose(0, 1) class Model(th.nn.Module): """Applies a linear transformation to the incoming data: :math:`y = Ax + b`. Args: in_features: size of each input sample out_features: size of each output sample bias: If set to False, the layer will not learn an additive bias. Default: ``True`` Shape: - Input: :math:`(N, *, in\\_features)` where :math:`*` means any number of additional dimensions - Output: :math:`(N, *, out\\_features)` where all but the last dimension are the same shape as the input. Attributes: weight: the learnable weights of the module of shape `(out_features x in_features)` bias: the learnable bias of the module of shape `(out_features)` Examples:: >>> m = nn.Linear(3, 20, 30) >>> input = torch.randn(128, 20) >>> output = m(input) >>> print(output.size()) """ def __init__(self, channels, in_features, out_features, batch_size=-1, bias=True, noise=False): super().__init__() self.in_features = in_features self.out_features = out_features self.channels = channels if noise: self.in_features += 1 self.weight = Parameter(th.Tensor(channels, self.in_features, out_features)) if bias: self.bias = Parameter(th.Tensor(channels, out_features)) else: self.register_parameter('bias', None) if noise: self.register_buffer('noise', th.Tensor(batch_size, channels, 1)) self.reset_parameters() def reset_parameters(self): stdv = 1.0 / math.sqrt(self.weight.size(1)) self.weight.data.uniform_(-stdv, stdv) if self.bias is not None: self.bias.data.uniform_(-stdv, stdv) def forward(self, input, adj_matrix=None, permutation_matrix=None): input_ = [input] if input.dim() == 2: if permutation_matrix is not None: input_.append(input.unsqueeze(1).expand([input.shape[0], self.channels, permutation_matrix.shape[1]])) elif hasattr(self, 'noise'): input_.append(input.unsqueeze(1).expand([input.shape[0], self.channels, self.in_features - 1])) else: input_.append(input.unsqueeze(1).expand([input.shape[0], self.channels, self.in_features])) if adj_matrix is not None and permutation_matrix is not None: input_.append((input_[-1].transpose(0, 1) @ (adj_matrix.t(). unsqueeze(2) * permutation_matrix)).transpose(0, 1)) elif adj_matrix is not None: input_.append(input_[-1] * adj_matrix.t().unsqueeze(0)) elif permutation_matrix is not None: input_.append((input_[-1].transpose(0, 1) @ permutation_matrix).t() ) if hasattr(self, 'noise'): self.noise.normal_() input_.append(th.cat([input_[-1], self.noise], 2)) return functional_linear3d(input_[-1], self.weight, self.bias) def extra_repr(self): return 'in_features={}, out_features={}, bias={}'.format(self. in_features, self.out_features, self.bias is not None) def apply_filter(self, permutation_matrix): transpose_weight = sel # ... truncated (>4000 chars) for memory efficiency
FullyConnected
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/r3/cr3febcwm3t44fuoitsx3ou2p6xg4sk4f7unagmmrvffasxf47te.py # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # out_1 => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, xmask) tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf2, 256, grid=grid(256), stream=stream0) del primals_2 return (buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.utils.data import torch.nn as nn def _init_weights(layer): """ Init weights of the layer :param layer: :return: """ nn.init.xavier_uniform_(layer.weight) if layer.bias is not None: nn.init.zeros_(layer.bias) class FullyConnected(nn.Module): def __init__(self, in_features, out_features, activation_fn=nn. functional.relu): super().__init__() self.fc = nn.Linear(in_features, out_features) _init_weights(self.fc) self.activation = activation_fn def forward(self, input): out = self.fc(input) if self.activation is not None: out = self.activation(out) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_features': 4, 'out_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.utils.data import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1, primals_2, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 return buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf2 def _init_weights(layer): """ Init weights of the layer :param layer: :return: """ nn.init.xavier_uniform_(layer.weight) if layer.bias is not None: nn.init.zeros_(layer.bias) class FullyConnectedNew(nn.Module): def __init__(self, in_features, out_features, activation_fn=nn. functional.relu): super().__init__() self.fc = nn.Linear(in_features, out_features) _init_weights(self.fc) self.activation = activation_fn def forward(self, input_0): primals_1 = self.fc.weight primals_2 = self.fc.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
AntoBcc/benchmarking-gnns
FullyConnected
false
2,024
[ "MIT" ]
0
c5750054b2f4ba0822f203fa18d382f6a3b16542
https://github.com/AntoBcc/benchmarking-gnns/tree/c5750054b2f4ba0822f203fa18d382f6a3b16542
import torch import torch.utils.data import torch.nn as nn def _init_weights(layer): """ Init weights of the layer :param layer: :return: """ nn.init.xavier_uniform_(layer.weight) if layer.bias is not None: nn.init.zeros_(layer.bias) class Model(nn.Module): def __init__(self, in_features, out_features, activation_fn=nn. functional.relu): super().__init__() self.fc = nn.Linear(in_features, out_features) _init_weights(self.fc) self.activation = activation_fn def forward(self, input): out = self.fc(input) if self.activation is not None: out = self.activation(out) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 4]
ResARModule
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/gz/cgzofc7tvcvkcukukuhjpyiitvdzbn6lf3hzra33w5otbn2tai4k.py # Topologically Sorted Source Nodes: [x_p], Original ATen: [aten.constant_pad_nd] # Source node to ATen node mapping: # x_p => constant_pad_nd # Graph fragment: # %constant_pad_nd : [num_users=1] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%primals_1, [3, 0], 0.0), kwargs = {}) triton_poi_fused_constant_pad_nd_0 = async_compile.triton('triton_poi_fused_constant_pad_nd_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_constant_pad_nd_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 28 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 7 x1 = (xindex // 7) x2 = xindex tmp0 = (-3) + x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.load(in_ptr0 + ((-3) + x0 + (4*x1)), tmp2 & xmask, other=0.0) tl.store(out_ptr0 + (x2), tmp3, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/jg/cjgj2szfnucys5wl2t33yu3rcaeeit4ocb5igrvtu4fnq32jux3v.py # Topologically Sorted Source Nodes: [h, h_1], Original ATen: [aten.convolution, aten._prelu_kernel] # Source node to ATen node mapping: # h => convolution # h_1 => gt, mul, where # Graph fragment: # %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%unsqueeze, %primals_2, %primals_3, [1], [0], [1], False, [0], 1), kwargs = {}) # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%squeeze, 0), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view, %squeeze), kwargs = {}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %squeeze, %mul), kwargs = {}) triton_poi_fused__prelu_kernel_convolution_1 = async_compile.triton('triton_poi_fused__prelu_kernel_convolution_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__prelu_kernel_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__prelu_kernel_convolution_1(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp6 = tmp5 * tmp2 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(in_out_ptr0 + (x2), tmp2, xmask) tl.store(out_ptr0 + (x2), tmp7, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/4e/c4ebwzkf44uozev65qnsvfdzekr6w6lgrhmc3y4is4es73btjzdk.py # Topologically Sorted Source Nodes: [y], Original ATen: [aten.add] # Source node to ATen node mapping: # y => add # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %squeeze_1), kwargs = {}) triton_poi_fused_add_2 = async_compile.triton('triton_poi_fused_add_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_out_ptr0 + (x2), xmask) tmp2 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/x3/cx3gzgdgx6jox7zudrijyyoykp2qgqojqifh2yoedw5bx55f3564.py # Topologically Sorted Source Nodes: [sh], Original ATen: [aten.convolution] # Source node to ATen node mapping: # sh => convolution_2 # Graph fragment: # %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%unsqueeze_1, %primals_7, %primals_8, [1], [0], [1], False, [0], 1), kwargs = {}) triton_poi_fused_convolution_3 = async_compile.triton('triton_poi_fused_convolution_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x2), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, ), (1, )) assert_size_stride(primals_5, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_6, (4, ), (1, )) assert_size_stride(primals_7, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_8, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 7), (7, 1), torch.float32) # Topologically Sorted Source Nodes: [x_p], Original ATen: [aten.constant_pad_nd] stream0 = get_raw_stream(0) triton_poi_fused_constant_pad_nd_0.run(primals_1, buf0, 28, grid=grid(28), stream=stream0) # Topologically Sorted Source Nodes: [h], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(reinterpret_tensor(buf0, (1, 4, 7), (0, 7, 1), 0), primals_2, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf1, (1, 4, 4), (16, 4, 1)) buf2 = buf1; del buf1 # reuse buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [h, h_1], Original ATen: [aten.convolution, aten._prelu_kernel] triton_poi_fused__prelu_kernel_convolution_1.run(buf2, primals_3, primals_4, buf3, 16, grid=grid(16), stream=stream0) del primals_3 # Topologically Sorted Source Nodes: [h_2], Original ATen: [aten.convolution] buf4 = extern_kernels.convolution(reinterpret_tensor(buf3, (1, 4, 4), (0, 4, 1), 0), primals_5, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf4, (1, 4, 4), (16, 4, 1)) buf5 = reinterpret_tensor(buf4, (4, 4), (4, 1), 0); del buf4 # reuse # Topologically Sorted Source Nodes: [y], Original ATen: [aten.add] triton_poi_fused_add_2.run(buf5, primals_1, primals_6, 16, grid=grid(16), stream=stream0) del primals_1 del primals_6 # Topologically Sorted Source Nodes: [sh], Original ATen: [aten.convolution] buf6 = extern_kernels.convolution(reinterpret_tensor(buf3, (1, 4, 4), (0, 4, 1), 0), primals_7, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf6, (1, 4, 4), (16, 4, 1)) buf7 = buf6; del buf6 # reuse # Topologically Sorted Source Nodes: [sh], Original ATen: [aten.convolution] triton_poi_fused_convolution_3.run(buf7, primals_8, 16, grid=grid(16), stream=stream0) del primals_8 return (buf5, reinterpret_tensor(buf7, (4, 4), (4, 1), 0), primals_2, primals_4, primals_5, primals_7, reinterpret_tensor(buf0, (1, 4, 7), (28, 7, 1), 0), buf2, reinterpret_tensor(buf3, (1, 4, 4), (16, 4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F from torch.nn.utils.spectral_norm import spectral_norm def build_norm_layer(norm_type, param=None, num_feats=None): if norm_type == 'bnorm': return nn.BatchNorm1d(num_feats) elif norm_type == 'snorm': spectral_norm(param) return None elif norm_type is None: return None else: raise TypeError('Unrecognized norm type: ', norm_type) class ResARModule(nn.Module): def __init__(self, ninp, fmaps, res_fmaps, kwidth, dilation, bias=True, norm_type=None, act=None): super().__init__() self.dil_conv = nn.Conv1d(ninp, fmaps, kwidth, dilation=dilation, bias=bias) if act is not None: self.act = getattr(nn, act)() else: self.act = nn.PReLU(fmaps, init=0) self.dil_norm = build_norm_layer(norm_type, self.dil_conv, fmaps) self.kwidth = kwidth self.dilation = dilation self.conv_1x1_skip = nn.Conv1d(fmaps, ninp, 1, bias=bias) self.conv_1x1_skip_norm = build_norm_layer(norm_type, self. conv_1x1_skip, ninp) self.conv_1x1_res = nn.Conv1d(fmaps, res_fmaps, 1, bias=bias) self.conv_1x1_res_norm = build_norm_layer(norm_type, self. conv_1x1_res, res_fmaps) def forward_norm(self, x, norm_layer): if norm_layer is not None: return norm_layer(x) else: return x def forward(self, x): kw__1 = self.kwidth - 1 P = kw__1 + kw__1 * (self.dilation - 1) x_p = F.pad(x, (P, 0)) h = self.dil_conv(x_p) h = self.forward_norm(h, self.dil_norm) h = self.act(h) a = h h = self.conv_1x1_skip(h) h = self.forward_norm(h, self.conv_1x1_skip_norm) y = x + h sh = self.conv_1x1_res(a) sh = self.forward_norm(sh, self.conv_1x1_res_norm) return y, sh def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'ninp': 4, 'fmaps': 4, 'res_fmaps': 4, 'kwidth': 4, 'dilation': 1}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn from torch.nn.utils.spectral_norm import spectral_norm assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 28 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 7 x1 = xindex // 7 x2 = xindex tmp0 = -3 + x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.load(in_ptr0 + (-3 + x0 + 4 * x1), tmp2 & xmask, other=0.0) tl.store(out_ptr0 + x2, tmp3, xmask) @triton.jit def triton_poi_fused__prelu_kernel_convolution_1(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp6 = tmp5 * tmp2 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(in_out_ptr0 + x2, tmp2, xmask) tl.store(out_ptr0 + x2, tmp7, xmask) @triton.jit def triton_poi_fused_add_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_out_ptr0 + x2, xmask) tmp2 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_8, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 7), (7, 1), torch.float32) get_raw_stream(0) triton_poi_fused_constant_pad_nd_0[grid(28)](primals_1, buf0, 28, XBLOCK=32, num_warps=1, num_stages=1) buf1 = extern_kernels.convolution(reinterpret_tensor(buf0, (1, 4, 7 ), (0, 7, 1), 0), primals_2, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf1, (1, 4, 4), (16, 4, 1)) buf2 = buf1 del buf1 buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused__prelu_kernel_convolution_1[grid(16)](buf2, primals_3, primals_4, buf3, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_3 buf4 = extern_kernels.convolution(reinterpret_tensor(buf3, (1, 4, 4 ), (0, 4, 1), 0), primals_5, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf4, (1, 4, 4), (16, 4, 1)) buf5 = reinterpret_tensor(buf4, (4, 4), (4, 1), 0) del buf4 triton_poi_fused_add_2[grid(16)](buf5, primals_1, primals_6, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_1 del primals_6 buf6 = extern_kernels.convolution(reinterpret_tensor(buf3, (1, 4, 4 ), (0, 4, 1), 0), primals_7, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf6, (1, 4, 4), (16, 4, 1)) buf7 = buf6 del buf6 triton_poi_fused_convolution_3[grid(16)](buf7, primals_8, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_8 return buf5, reinterpret_tensor(buf7, (4, 4), (4, 1), 0 ), primals_2, primals_4, primals_5, primals_7, reinterpret_tensor(buf0, (1, 4, 7), (28, 7, 1), 0), buf2, reinterpret_tensor(buf3, (1, 4, 4), (16, 4, 1), 0) def build_norm_layer(norm_type, param=None, num_feats=None): if norm_type == 'bnorm': return nn.BatchNorm1d(num_feats) elif norm_type == 'snorm': spectral_norm(param) return None elif norm_type is None: return None else: raise TypeError('Unrecognized norm type: ', norm_type) class ResARModuleNew(nn.Module): def __init__(self, ninp, fmaps, res_fmaps, kwidth, dilation, bias=True, norm_type=None, act=None): super().__init__() self.dil_conv = nn.Conv1d(ninp, fmaps, kwidth, dilation=dilation, bias=bias) if act is not None: self.act = getattr(nn, act)() else: self.act = nn.PReLU(fmaps, init=0) self.dil_norm = build_norm_layer(norm_type, self.dil_conv, fmaps) self.kwidth = kwidth self.dilation = dilation self.conv_1x1_skip = nn.Conv1d(fmaps, ninp, 1, bias=bias) self.conv_1x1_skip_norm = build_norm_layer(norm_type, self. conv_1x1_skip, ninp) self.conv_1x1_res = nn.Conv1d(fmaps, res_fmaps, 1, bias=bias) self.conv_1x1_res_norm = build_norm_layer(norm_type, self. conv_1x1_res, res_fmaps) def forward_norm(self, x, norm_layer): if norm_layer is not None: return norm_layer(x) else: return x def forward(self, input_0): primals_2 = self.dil_conv.weight primals_3 = self.dil_conv.bias primals_4 = self.act.weight primals_5 = self.conv_1x1_skip.weight primals_6 = self.conv_1x1_skip.bias primals_7 = self.conv_1x1_res.weight primals_8 = self.conv_1x1_res.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return output[0], output[1]
BaiYuhaoSpiceeYJ/SEGAN_denoise
ResARModule
false
2,025
[ "MIT" ]
0
5bf65ae72b9f0a996ae338c53c68c4967e08cd59
https://github.com/BaiYuhaoSpiceeYJ/SEGAN_denoise/tree/5bf65ae72b9f0a996ae338c53c68c4967e08cd59
import torch import torch.nn as nn import torch.nn.functional as F from torch.nn.utils.spectral_norm import spectral_norm def build_norm_layer(norm_type, param=None, num_feats=None): if norm_type == 'bnorm': return nn.BatchNorm1d(num_feats) elif norm_type == 'snorm': spectral_norm(param) return None elif norm_type is None: return None else: raise TypeError('Unrecognized norm type: ', norm_type) class Model(nn.Module): def __init__(self, ninp, fmaps, res_fmaps, kwidth, dilation, bias=True, norm_type=None, act=None): super().__init__() self.dil_conv = nn.Conv1d(ninp, fmaps, kwidth, dilation=dilation, bias=bias) if act is not None: self.act = getattr(nn, act)() else: self.act = nn.PReLU(fmaps, init=0) self.dil_norm = build_norm_layer(norm_type, self.dil_conv, fmaps) self.kwidth = kwidth self.dilation = dilation self.conv_1x1_skip = nn.Conv1d(fmaps, ninp, 1, bias=bias) self.conv_1x1_skip_norm = build_norm_layer(norm_type, self. conv_1x1_skip, ninp) self.conv_1x1_res = nn.Conv1d(fmaps, res_fmaps, 1, bias=bias) self.conv_1x1_res_norm = build_norm_layer(norm_type, self. conv_1x1_res, res_fmaps) def forward_norm(self, x, norm_layer): if norm_layer is not None: return norm_layer(x) else: return x def forward(self, x): kw__1 = self.kwidth - 1 P = kw__1 + kw__1 * (self.dilation - 1) x_p = F.pad(x, (P, 0)) h = self.dil_conv(x_p) h = self.forward_norm(h, self.dil_norm) h = self.act(h) a = h h = self.conv_1x1_skip(h) h = self.forward_norm(h, self.conv_1x1_skip_norm) y = x + h sh = self.conv_1x1_res(a) sh = self.forward_norm(sh, self.conv_1x1_res_norm) return y, sh def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'ninp': 4, 'fmaps': 4, 'res_fmaps': 4, 'kwidth': 4, 'dilation': 1}]
CharbonnierLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/ev/cevrgxdxpmu3sxdi2mxonyf62b6i4l37kax3snq4l7gm2ajbiknc.py # Topologically Sorted Source Nodes: [diff, mul, add, sqrt, loss], Original ATen: [aten.sub, aten.mul, aten.add, aten.sqrt, aten.mean] # Source node to ATen node mapping: # add => add # diff => sub # loss => mean # mul => mul # sqrt => sqrt # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %arg1_1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %sub), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, 1e-06), kwargs = {}) # %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%add,), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sqrt,), kwargs = {}) triton_per_fused_add_mean_mul_sqrt_sub_0 = async_compile.triton('triton_per_fused_add_mean_mul_sqrt_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_mean_mul_sqrt_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_mean_mul_sqrt_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.load(in_ptr1 + (r0), None) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp4 = 1e-06 tmp5 = tmp3 + tmp4 tmp6 = libdevice.sqrt(tmp5) tmp7 = tl.broadcast_to(tmp6, [RBLOCK]) tmp9 = triton_helpers.promote_to_tensor(tl.sum(tmp7, 0)) tmp10 = 256.0 tmp11 = tmp9 / tmp10 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp11, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [diff, mul, add, sqrt, loss], Original ATen: [aten.sub, aten.mul, aten.add, aten.sqrt, aten.mean] stream0 = get_raw_stream(0) triton_per_fused_add_mean_mul_sqrt_sub_0.run(buf1, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.parallel class CharbonnierLoss(nn.Module): """Charbonnier Loss (L1)""" def __init__(self, eps=0.001): super(CharbonnierLoss, self).__init__() self.eps = eps def forward(self, x, y): diff = x - y loss = torch.mean(torch.sqrt(diff * diff + self.eps * self.eps)) return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.nn.parallel assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_mean_mul_sqrt_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp4 = 1e-06 tmp5 = tmp3 + tmp4 tmp6 = libdevice.sqrt(tmp5) tmp7 = tl.broadcast_to(tmp6, [RBLOCK]) tmp9 = triton_helpers.promote_to_tensor(tl.sum(tmp7, 0)) tmp10 = 256.0 tmp11 = tmp9 / tmp10 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp11, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_add_mean_mul_sqrt_sub_0[grid(1)](buf1, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class CharbonnierLossNew(nn.Module): """Charbonnier Loss (L1)""" def __init__(self, eps=0.001): super(CharbonnierLossNew, self).__init__() self.eps = eps def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
Blatts01/VckImageRestoration
CharbonnierLoss
false
2,026
[ "MIT" ]
0
ae4e2221d9d4e236a08722cb92ac5cc88947e311
https://github.com/Blatts01/VckImageRestoration/tree/ae4e2221d9d4e236a08722cb92ac5cc88947e311
import torch import torch.nn as nn import torch.nn.parallel class Model(nn.Module): """Charbonnier Loss (L1)""" def __init__(self, eps=0.001): super().__init__() self.eps = eps def forward(self, x, y): diff = x - y loss = torch.mean(torch.sqrt(diff * diff + self.eps * self.eps)) return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
SepConv2d
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/3d/c3dtqdllikpvc42pwsvkftosqn6pxptg2cxijjdh2n5bwqbqv7y6.py # Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # x => convolution # x_1 => relu # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 4), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/tc/ctcagp37ljugm52zu6ckorigrppqo67voefe2f2odg5r6hyllhyu.py # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution] # Source node to ATen node mapping: # x_2 => convolution_1 # Graph fragment: # %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x2), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 1, 4, 4), (16, 16, 4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=4, bias=None) assert_size_stride(buf0, (4, 4, 1, 1), (4, 1, 1, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 16, grid=grid(16), stream=stream0) del primals_2 # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 1, 1), (4, 1, 1, 1)) buf3 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution] triton_poi_fused_convolution_1.run(buf3, primals_5, 16, grid=grid(16), stream=stream0) del primals_5 return (buf3, primals_1, primals_3, primals_4, buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 1, 4, 4), (16, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.parallel class SepConv2d(torch.nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, act_layer=nn.ReLU): super(SepConv2d, self).__init__() self.depthwise = torch.nn.Conv2d(in_channels, in_channels, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=in_channels) self.pointwise = torch.nn.Conv2d(in_channels, out_channels, kernel_size=1) self.act_layer = act_layer() if act_layer is not None else nn.Identity( ) self.in_channels = in_channels self.out_channels = out_channels self.kernel_size = kernel_size self.stride = stride def forward(self, x): x = self.depthwise(x) x = self.act_layer(x) x = self.pointwise(x) return x def flops(self, H, W): flops = 0 flops += (H * W * self.in_channels * self.kernel_size ** 2 / self. stride ** 2) flops += H * W * self.in_channels * self.out_channels return flops def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.nn.parallel assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 1, 4, 4), (16, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=4, bias=None) assert_size_stride(buf0, (4, 4, 1, 1), (4, 1, 1, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(16)](buf1, primals_2, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 1, 1), (4, 1, 1, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_1[grid(16)](buf3, primals_5, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_5 return buf3, primals_1, primals_3, primals_4, buf1 class SepConv2dNew(torch.nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, act_layer=nn.ReLU): super(SepConv2dNew, self).__init__() self.depthwise = torch.nn.Conv2d(in_channels, in_channels, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=in_channels) self.pointwise = torch.nn.Conv2d(in_channels, out_channels, kernel_size=1) self.act_layer = act_layer() if act_layer is not None else nn.Identity( ) self.in_channels = in_channels self.out_channels = out_channels self.kernel_size = kernel_size self.stride = stride def flops(self, H, W): flops = 0 flops += (H * W * self.in_channels * self.kernel_size ** 2 / self. stride ** 2) flops += H * W * self.in_channels * self.out_channels return flops def forward(self, input_0): primals_1 = self.depthwise.weight primals_2 = self.depthwise.bias primals_4 = self.pointwise.weight primals_5 = self.pointwise.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
Blatts01/VckImageRestoration
SepConv2d
false
2,027
[ "MIT" ]
0
ae4e2221d9d4e236a08722cb92ac5cc88947e311
https://github.com/Blatts01/VckImageRestoration/tree/ae4e2221d9d4e236a08722cb92ac5cc88947e311
import torch import torch.nn as nn import torch.nn.parallel class Model(torch.nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, act_layer=nn.ReLU): super().__init__() self.depthwise = torch.nn.Conv2d(in_channels, in_channels, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=in_channels) self.pointwise = torch.nn.Conv2d(in_channels, out_channels, kernel_size=1) self.act_layer = act_layer() if act_layer is not None else nn.Identity( ) self.in_channels = in_channels self.out_channels = out_channels self.kernel_size = kernel_size self.stride = stride def forward(self, x): x = self.depthwise(x) x = self.act_layer(x) x = self.pointwise(x) return x def flops(self, H, W): flops = 0 flops += (H * W * self.in_channels * self.kernel_size ** 2 / self. stride ** 2) flops += H * W * self.in_channels * self.out_channels return flops def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 4, 4]
CNN
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/nx/cnxws3q66oexgt7cc7qipch6axzrogfuqs62jltv7afv7kdhibh7.py # Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d => convolution # x => relu # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[524288], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 288000 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 3600) % 20 x0 = xindex % 3600 x4 = (xindex // 3600) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(out_ptr0 + (x0 + (3616*x4)), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/ar/carimle5mqx7xfbvwnn44w6mirx7wgqcjxiw32pqiiybbxmwtswv.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # x_1 => _low_memory_max_pool2d_with_offsets, getitem_1 # Graph fragment: # %_low_memory_max_pool2d_with_offsets : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%relu, [3, 3], [3, 3], [0, 0], [1, 1], False), kwargs = {}) # %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_1 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32768], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 32000 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 20 x1 = (xindex // 20) % 20 x2 = (xindex // 400) x5 = xindex x4 = (xindex // 8000) x6 = xindex % 8000 tmp0 = tl.load(in_ptr0 + ((3*x0) + (180*x1) + (3616*x2)), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (3*x0) + (180*x1) + (3616*x2)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + (3*x0) + (180*x1) + (3616*x2)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (60 + (3*x0) + (180*x1) + (3616*x2)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (61 + (3*x0) + (180*x1) + (3616*x2)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (62 + (3*x0) + (180*x1) + (3616*x2)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (120 + (3*x0) + (180*x1) + (3616*x2)), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr0 + (121 + (3*x0) + (180*x1) + (3616*x2)), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr0 + (122 + (3*x0) + (180*x1) + (3616*x2)), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp8 = triton_helpers.maximum(tmp7, tmp6) tmp10 = triton_helpers.maximum(tmp9, tmp8) tmp12 = triton_helpers.maximum(tmp11, tmp10) tmp14 = triton_helpers.maximum(tmp13, tmp12) tmp16 = triton_helpers.maximum(tmp15, tmp14) tmp17 = tmp1 > tmp0 tmp18 = tl.full([1], 1, tl.int8) tmp19 = tl.full([1], 0, tl.int8) tmp20 = tl.where(tmp17, tmp18, tmp19) tmp21 = tmp3 > tmp2 tmp22 = tl.full([1], 2, tl.int8) tmp23 = tl.where(tmp21, tmp22, tmp20) tmp24 = tmp5 > tmp4 tmp25 = tl.full([1], 3, tl.int8) tmp26 = tl.where(tmp24, tmp25, tmp23) tmp27 = tmp7 > tmp6 tmp28 = tl.full([1], 4, tl.int8) tmp29 = tl.where(tmp27, tmp28, tmp26) tmp30 = tmp9 > tmp8 tmp31 = tl.full([1], 5, tl.int8) tmp32 = tl.where(tmp30, tmp31, tmp29) tmp33 = tmp11 > tmp10 tmp34 = tl.full([1], 6, tl.int8) tmp35 = tl.where(tmp33, tmp34, tmp32) tmp36 = tmp13 > tmp12 tmp37 = tl.full([1], 7, tl.int8) tmp38 = tl.where(tmp36, tmp37, tmp35) tmp39 = tmp15 > tmp14 tmp40 = tl.full([1], 8, tl.int8) tmp41 = tl.where(tmp39, tmp40, tmp38) tl.store(out_ptr0 + (x5), tmp16, xmask) tl.store(out_ptr1 + (x6 + (8064*x4)), tmp41, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/yi/cyin4ihdolqlj45lkcjp4sx4eyv33tfs5j2qljx4g5jvtfmbgw25.py # Topologically Sorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: # Graph fragment: # %cat_default : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%view, %full_default_2],), kwargs = {}) triton_poi_fused_2 = async_compile.triton('triton_poi_fused_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 35840 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 1280) x0 = xindex % 1280 x2 = xindex tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 25, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + (1280*x1)), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 28, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = 0.0 tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype) tmp11 = tl.where(tmp6, tmp9, tmp10) tmp12 = tl.where(tmp4, tmp5, tmp11) tl.store(out_ptr0 + (x2), tmp12, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/l5/cl5sjq3cao3ukqmcgpoa2rk5rt5frxu6v3u2awumu7n77uacypek.py # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu] # Source node to ATen node mapping: # x_3 => relu_1 # Graph fragment: # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%slice_tensor,), kwargs = {}) triton_poi_fused_relu_3 = async_compile.triton('triton_poi_fused_relu_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2048], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1600 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/sp/cspf4xskbjcs6u62siylmohrnlsuhwlvu5pyqxybc4vjyblunr3h.py # Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax] # Source node to ATen node mapping: # log_softmax => amax, exp, log, sub, sub_1, sum_1 # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%addmm_1, [-1], True), kwargs = {}) # %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%addmm_1, %amax), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {}) triton_per_fused__log_softmax_4 = async_compile.triton('triton_per_fused__log_softmax_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[32, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__log_softmax_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__log_softmax_4(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 25 rnumel = 10 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = rindex < rnumel r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (10*x0)), rmask & xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(rmask & xmask, tmp1, float("-inf")) tmp4 = triton_helpers.max2(tmp3, 1)[:, None] tmp5 = tmp0 - tmp4 tmp6 = tl_math.exp(tmp5) tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.where(rmask & xmask, tmp7, 0) tmp10 = tl.sum(tmp9, 1)[:, None] tmp11 = tl_math.log(tmp10) tmp12 = tmp5 - tmp11 tl.store(out_ptr2 + (r1 + (10*x0)), tmp12, rmask & xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (20, 1, 5, 5), (25, 25, 5, 1)) assert_size_stride(primals_2, (20, ), (1, )) assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1)) assert_size_stride(primals_4, (64, 1280), (1280, 1)) assert_size_stride(primals_5, (64, ), (1, )) assert_size_stride(primals_6, (10, 64), (64, 1)) assert_size_stride(primals_7, (10, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 20, 60, 60), (72000, 3600, 60, 1)) buf1 = empty_strided_cuda((4, 20, 60, 60), (72320, 3616, 60, 1), torch.float32) # Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_convolution_relu_0.run(buf0, primals_2, buf1, 288000, grid=grid(288000), stream=stream0) del buf0 del primals_2 buf2 = empty_strided_cuda((4, 20, 20, 20), (8000, 400, 20, 1), torch.float32) buf3 = empty_strided_cuda((4, 20, 20, 20), (8064, 400, 20, 1), torch.int8) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_1.run(buf1, buf2, buf3, 32000, grid=grid(32000), stream=stream0) buf4 = empty_strided_cuda((28, 1280), (1280, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] triton_poi_fused_2.run(buf2, buf4, 35840, grid=grid(35840), stream=stream0) buf5 = empty_strided_cuda((28, 64), (64, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf4, reinterpret_tensor(primals_4, (1280, 64), (1, 1280), 0), out=buf5) del buf4 buf6 = empty_strided_cuda((25, 64), (64, 1), torch.float32) # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu] triton_poi_fused_relu_3.run(buf5, primals_5, buf6, 1600, grid=grid(1600), stream=stream0) del buf5 del primals_5 buf7 = empty_strided_cuda((25, 10), (10, 1), torch.float32) # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.addmm] extern_kernels.addmm(primals_7, buf6, reinterpret_tensor(primals_6, (64, 10), (1, 64), 0), alpha=1, beta=1, out=buf7) del primals_7 buf10 = empty_strided_cuda((25, 10), (10, 1), torch.float32) # Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax] triton_per_fused__log_softmax_4.run(buf7, buf10, 25, 10, grid=grid(25), stream=stream0) del buf7 return (buf10, primals_1, primals_3, buf1, buf3, reinterpret_tensor(buf2, (25, 1280), (1280, 1), 0), buf6, buf10, primals_6, primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((20, 1, 5, 5), (25, 25, 5, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((20, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 1, 64, 64), (4096, 4096, 64, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((64, 1280), (1280, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((10, 64), (64, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class CNN(nn.Module): """ Convolutional Neural Network. """ def __init__(self): super().__init__() self.conv1 = nn.Conv2d(1, 20, kernel_size=5, stride=1) self.fc1 = nn.Linear(8 * 8 * 20, 64) self.fc2 = nn.Linear(64, 10) def forward(self, x): x = F.relu(self.conv1(x)) x = F.max_pool2d(x, 3, 3) x = x.view(-1, 8 * 8 * 20) x = F.relu(self.fc1(x)) x = self.fc2(x) return F.log_softmax(x, dim=-1) def get_inputs(): return [torch.rand([4, 1, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_relu_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 288000 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 3600 % 20 x0 = xindex % 3600 x4 = xindex // 3600 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(out_ptr0 + (x0 + 3616 * x4), tmp4, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 32000 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 20 x1 = xindex // 20 % 20 x2 = xindex // 400 x5 = xindex x4 = xindex // 8000 x6 = xindex % 8000 tmp0 = tl.load(in_ptr0 + (3 * x0 + 180 * x1 + 3616 * x2), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 3 * x0 + 180 * x1 + 3616 * x2), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 3 * x0 + 180 * x1 + 3616 * x2), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (60 + 3 * x0 + 180 * x1 + 3616 * x2), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (61 + 3 * x0 + 180 * x1 + 3616 * x2), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (62 + 3 * x0 + 180 * x1 + 3616 * x2), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (120 + 3 * x0 + 180 * x1 + 3616 * x2), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr0 + (121 + 3 * x0 + 180 * x1 + 3616 * x2), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr0 + (122 + 3 * x0 + 180 * x1 + 3616 * x2), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp8 = triton_helpers.maximum(tmp7, tmp6) tmp10 = triton_helpers.maximum(tmp9, tmp8) tmp12 = triton_helpers.maximum(tmp11, tmp10) tmp14 = triton_helpers.maximum(tmp13, tmp12) tmp16 = triton_helpers.maximum(tmp15, tmp14) tmp17 = tmp1 > tmp0 tmp18 = tl.full([1], 1, tl.int8) tmp19 = tl.full([1], 0, tl.int8) tmp20 = tl.where(tmp17, tmp18, tmp19) tmp21 = tmp3 > tmp2 tmp22 = tl.full([1], 2, tl.int8) tmp23 = tl.where(tmp21, tmp22, tmp20) tmp24 = tmp5 > tmp4 tmp25 = tl.full([1], 3, tl.int8) tmp26 = tl.where(tmp24, tmp25, tmp23) tmp27 = tmp7 > tmp6 tmp28 = tl.full([1], 4, tl.int8) tmp29 = tl.where(tmp27, tmp28, tmp26) tmp30 = tmp9 > tmp8 tmp31 = tl.full([1], 5, tl.int8) tmp32 = tl.where(tmp30, tmp31, tmp29) tmp33 = tmp11 > tmp10 tmp34 = tl.full([1], 6, tl.int8) tmp35 = tl.where(tmp33, tmp34, tmp32) tmp36 = tmp13 > tmp12 tmp37 = tl.full([1], 7, tl.int8) tmp38 = tl.where(tmp36, tmp37, tmp35) tmp39 = tmp15 > tmp14 tmp40 = tl.full([1], 8, tl.int8) tmp41 = tl.where(tmp39, tmp40, tmp38) tl.store(out_ptr0 + x5, tmp16, xmask) tl.store(out_ptr1 + (x6 + 8064 * x4), tmp41, xmask) @triton.jit def triton_poi_fused_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 35840 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 1280 x0 = xindex % 1280 x2 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 25, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 1280 * x1), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 28, tl.int64) tmp9 = 0.0 tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype) tmp11 = tl.where(tmp6, tmp9, tmp10) tmp12 = tl.where(tmp4, tmp5, tmp11) tl.store(out_ptr0 + x2, tmp12, xmask) @triton.jit def triton_poi_fused_relu_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 1600 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_per_fused__log_softmax_4(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 25 rnumel = 10 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] rmask = rindex < rnumel r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 10 * x0), rmask & xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(rmask & xmask, tmp1, float('-inf')) tmp4 = triton_helpers.max2(tmp3, 1)[:, None] tmp5 = tmp0 - tmp4 tmp6 = tl_math.exp(tmp5) tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.where(rmask & xmask, tmp7, 0) tmp10 = tl.sum(tmp9, 1)[:, None] tmp11 = tl_math.log(tmp10) tmp12 = tmp5 - tmp11 tl.store(out_ptr2 + (r1 + 10 * x0), tmp12, rmask & xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (20, 1, 5, 5), (25, 25, 5, 1)) assert_size_stride(primals_2, (20,), (1,)) assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1)) assert_size_stride(primals_4, (64, 1280), (1280, 1)) assert_size_stride(primals_5, (64,), (1,)) assert_size_stride(primals_6, (10, 64), (64, 1)) assert_size_stride(primals_7, (10,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 20, 60, 60), (72000, 3600, 60, 1)) buf1 = empty_strided_cuda((4, 20, 60, 60), (72320, 3616, 60, 1), torch.float32) get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(288000)](buf0, primals_2, buf1, 288000, XBLOCK=1024, num_warps=4, num_stages=1) del buf0 del primals_2 buf2 = empty_strided_cuda((4, 20, 20, 20), (8000, 400, 20, 1), torch.float32) buf3 = empty_strided_cuda((4, 20, 20, 20), (8064, 400, 20, 1), torch.int8) triton_poi_fused_max_pool2d_with_indices_1[grid(32000)](buf1, buf2, buf3, 32000, XBLOCK=128, num_warps=4, num_stages=1) buf4 = empty_strided_cuda((28, 1280), (1280, 1), torch.float32) triton_poi_fused_2[grid(35840)](buf2, buf4, 35840, XBLOCK=512, num_warps=4, num_stages=1) buf5 = empty_strided_cuda((28, 64), (64, 1), torch.float32) extern_kernels.mm(buf4, reinterpret_tensor(primals_4, (1280, 64), ( 1, 1280), 0), out=buf5) del buf4 buf6 = empty_strided_cuda((25, 64), (64, 1), torch.float32) triton_poi_fused_relu_3[grid(1600)](buf5, primals_5, buf6, 1600, XBLOCK=128, num_warps=4, num_stages=1) del buf5 del primals_5 buf7 = empty_strided_cuda((25, 10), (10, 1), torch.float32) extern_kernels.addmm(primals_7, buf6, reinterpret_tensor(primals_6, (64, 10), (1, 64), 0), alpha=1, beta=1, out=buf7) del primals_7 buf10 = empty_strided_cuda((25, 10), (10, 1), torch.float32) triton_per_fused__log_softmax_4[grid(25)](buf7, buf10, 25, 10, XBLOCK=1, num_warps=2, num_stages=1) del buf7 return buf10, primals_1, primals_3, buf1, buf3, reinterpret_tensor(buf2, (25, 1280), (1280, 1), 0), buf6, buf10, primals_6, primals_4 class CNNNew(nn.Module): """ Convolutional Neural Network. """ def __init__(self): super().__init__() self.conv1 = nn.Conv2d(1, 20, kernel_size=5, stride=1) self.fc1 = nn.Linear(8 * 8 * 20, 64) self.fc2 = nn.Linear(64, 10) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.fc1.weight primals_5 = self.fc1.bias primals_6 = self.fc2.weight primals_7 = self.fc2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
Balandat/Ax
CNN
false
2,028
[ "MIT" ]
0
6c7556165291a5329744b5075d5f95d2dec18938
https://github.com/Balandat/Ax/tree/6c7556165291a5329744b5075d5f95d2dec18938
import torch import torch.nn as nn import torch.nn.functional as F class Model(nn.Module): """ Convolutional Neural Network. """ def __init__(self): super().__init__() self.conv1 = nn.Conv2d(1, 20, kernel_size=5, stride=1) self.fc1 = nn.Linear(8 * 8 * 20, 64) self.fc2 = nn.Linear(64, 10) def forward(self, x): x = F.relu(self.conv1(x)) x = F.max_pool2d(x, 3, 3) x = x.view(-1, 8 * 8 * 20) x = F.relu(self.fc1(x)) x = self.fc2(x) return F.log_softmax(x, dim=-1) def get_inputs(): return [torch.rand([4, 1, 64, 64])] def get_init_inputs(): return []
Delta
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/ac/cactp4raijeuupgkf72atpb5dqx2m7nq5k7fpzjavsrr2lquv6jf.py # Topologically Sorted Source Nodes: [specgram_1], Original ATen: [aten.replication_pad1d] # Source node to ATen node mapping: # specgram_1 => _unsafe_index # Graph fragment: # %_unsafe_index : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%view, [None, None, %clamp_max]), kwargs = {}) triton_poi_fused_replication_pad1d_0 = async_compile.triton('triton_poi_fused_replication_pad1d_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_replication_pad1d_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_replication_pad1d_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = (xindex // 8) x2 = xindex tmp0 = tl.load(in_ptr0 + ((4*(x1 % 4)) + (16*(x1 // 16)) + (64*((x1 // 4) % 4)) + ((3) * ((3) <= (((0) * ((0) >= ((-2) + x0)) + ((-2) + x0) * (((-2) + x0) > (0))))) + (((0) * ((0) >= ((-2) + x0)) + ((-2) + x0) * (((-2) + x0) > (0)))) * ((((0) * ((0) >= ((-2) + x0)) + ((-2) + x0) * (((-2) + x0) > (0)))) < (3)))), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/ns/cnsqxcdmqckv2b4hbcx2tgslbei3i2eta37z6mobhzwxtt56do2q.py # Topologically Sorted Source Nodes: [arange, kernel], Original ATen: [aten.arange, aten.repeat] # Source node to ATen node mapping: # arange => add, convert_element_type, iota_1, mul # kernel => repeat # Graph fragment: # %iota_1 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (5,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%iota_1, 1), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, -2), kwargs = {}) # %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%add, torch.float32), kwargs = {}) # %repeat : [num_users=1] = call_function[target=torch.ops.aten.repeat.default](args = (%convert_element_type, [64, 1, 1]), kwargs = {}) triton_poi_fused_arange_repeat_1 = async_compile.triton('triton_poi_fused_arange_repeat_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_arange_repeat_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_arange_repeat_1(out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 320 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 5 x2 = xindex tmp0 = (-2) + x0 tmp1 = tmp0.to(tl.float32) tl.store(out_ptr0 + (x2), tmp1, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/7o/c7o6lz3z3wc2hctevxaw5a7vifalwi3bccdwtwzi62tq4djyl2kg.py # Topologically Sorted Source Nodes: [specgram_3], Original ATen: [aten.replication_pad1d] # Source node to ATen node mapping: # specgram_3 => _unsafe_index_1 # Graph fragment: # %_unsafe_index_1 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%view_2, [None, None, %clamp_max_1]), kwargs = {}) triton_poi_fused_replication_pad1d_2 = async_compile.triton('triton_poi_fused_replication_pad1d_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_replication_pad1d_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_replication_pad1d_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = (xindex // 8) x2 = xindex tmp0 = tl.load(in_ptr0 + ((4*x1) + ((3) * ((3) <= (((0) * ((0) >= ((-2) + x0)) + ((-2) + x0) * (((-2) + x0) > (0))))) + (((0) * ((0) >= ((-2) + x0)) + ((-2) + x0) * (((-2) + x0) > (0)))) * ((((0) * ((0) >= ((-2) + x0)) + ((-2) + x0) * (((-2) + x0) > (0)))) < (3)))), xmask, eviction_policy='evict_last') tmp1 = 0.1 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + (x2), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/i7/ci7zzwpyjyzmhwbdzexdktbiadv5a4qpgn2zumseiqxejm7orwxo.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.cat] # Source node to ATen node mapping: # x => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%arg0_1, %permute_1, %permute_3], -1), kwargs = {}) triton_poi_fused_cat_3 = async_compile.triton('triton_poi_fused_cat_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 768 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 12 x4 = (xindex // 12) x1 = (xindex // 12) % 4 x2 = (xindex // 48) % 4 x3 = (xindex // 192) x5 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((4*x4) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + ((4*x1) + (16*x3) + (64*x2) + ((-4) + x0)), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = 0.1 tmp12 = tmp10 * tmp11 tmp13 = tl.full(tmp12.shape, 0.0, tmp12.dtype) tmp14 = tl.where(tmp9, tmp12, tmp13) tmp15 = tmp0 >= tmp7 tmp16 = tl.full([1], 12, tl.int64) tmp17 = tmp0 < tmp16 tmp18 = tl.load(in_ptr2 + ((4*x1) + (16*x3) + (64*x2) + ((-8) + x0)), tmp15 & xmask, eviction_policy='evict_last', other=0.0) tmp19 = tmp18 * tmp11 tmp20 = tl.full(tmp19.shape, 0.0, tmp19.dtype) tmp21 = tl.where(tmp15, tmp19, tmp20) tmp22 = tl.where(tmp9, tmp14, tmp21) tmp23 = tl.where(tmp4, tmp5, tmp22) tl.store(out_ptr0 + (x5), tmp23, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((1, 64, 8), (512, 8, 1), torch.float32) # Topologically Sorted Source Nodes: [specgram_1], Original ATen: [aten.replication_pad1d] stream0 = get_raw_stream(0) triton_poi_fused_replication_pad1d_0.run(arg0_1, buf0, 512, grid=grid(512), stream=stream0) buf1 = empty_strided_cuda((64, 1, 5), (5, 5, 1), torch.float32) # Topologically Sorted Source Nodes: [arange, kernel], Original ATen: [aten.arange, aten.repeat] triton_poi_fused_arange_repeat_1.run(buf1, 320, grid=grid(320), stream=stream0) # Topologically Sorted Source Nodes: [specgram_1, arange, kernel, conv1d], Original ATen: [aten.replication_pad1d, aten.arange, aten.repeat, aten.convolution] buf2 = extern_kernels.convolution(buf0, buf1, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=64, bias=None) assert_size_stride(buf2, (1, 64, 4), (256, 4, 1)) buf3 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [specgram_3], Original ATen: [aten.replication_pad1d] triton_poi_fused_replication_pad1d_2.run(buf2, buf3, 512, grid=grid(512), stream=stream0) buf4 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [arange_1, kernel_1], Original ATen: [aten.arange, aten.repeat] triton_poi_fused_arange_repeat_1.run(buf4, 320, grid=grid(320), stream=stream0) # Topologically Sorted Source Nodes: [specgram_3, arange_1, kernel_1, conv1d_1], Original ATen: [aten.replication_pad1d, aten.arange, aten.repeat, aten.convolution] buf5 = extern_kernels.convolution(buf3, buf4, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=64, bias=None) assert_size_stride(buf5, (1, 64, 4), (256, 4, 1)) del buf3 del buf4 buf6 = empty_strided_cuda((4, 4, 4, 12), (192, 48, 12, 1), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.cat] triton_poi_fused_cat_3.run(arg0_1, buf2, buf5, buf6, 768, grid=grid(768), stream=stream0) del arg0_1 del buf2 del buf5 return (buf6, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn from torchaudio import transforms class Delta(nn.Module): def __init__(self, order=2, **kwargs): super(Delta, self).__init__() self.order = order self.compute_delta = transforms.ComputeDeltas(**kwargs) def forward(self, x): feats = [x] for o in range(self.order): feat = feats[-1].transpose(0, 1).unsqueeze(0) delta = self.compute_delta(feat) feats.append(delta.squeeze(0).transpose(0, 1)) x = torch.cat(feats, dim=-1) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn from torchaudio import transforms assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_replication_pad1d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = tl.load(in_ptr0 + (4 * (x1 % 4) + 16 * (x1 // 16) + 64 * (x1 // 4 % 4) + (3 * (3 <= 0 * (0 >= -2 + x0) + (-2 + x0) * (-2 + x0 > 0)) + (0 * (0 >= -2 + x0) + (-2 + x0) * (-2 + x0 > 0)) * (0 * (0 >= -2 + x0) + (-2 + x0) * (-2 + x0 > 0) < 3))), xmask, eviction_policy= 'evict_last') tl.store(out_ptr0 + x2, tmp0, xmask) @triton.jit def triton_poi_fused_arange_repeat_1(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 320 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 5 x2 = xindex tmp0 = -2 + x0 tmp1 = tmp0.to(tl.float32) tl.store(out_ptr0 + x2, tmp1, xmask) @triton.jit def triton_poi_fused_replication_pad1d_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = tl.load(in_ptr0 + (4 * x1 + (3 * (3 <= 0 * (0 >= -2 + x0) + (-2 + x0) * (-2 + x0 > 0)) + (0 * (0 >= -2 + x0) + (-2 + x0) * (-2 + x0 > 0)) * (0 * (0 >= -2 + x0) + (-2 + x0) * (-2 + x0 > 0) < 3))), xmask, eviction_policy='evict_last') tmp1 = 0.1 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x2, tmp2, xmask) @triton.jit def triton_poi_fused_cat_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 768 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 12 x4 = xindex // 12 x1 = xindex // 12 % 4 x2 = xindex // 48 % 4 x3 = xindex // 192 x5 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x4 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + (4 * x1 + 16 * x3 + 64 * x2 + (-4 + x0)), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = 0.1 tmp12 = tmp10 * tmp11 tmp13 = tl.full(tmp12.shape, 0.0, tmp12.dtype) tmp14 = tl.where(tmp9, tmp12, tmp13) tmp15 = tmp0 >= tmp7 tl.full([1], 12, tl.int64) tmp18 = tl.load(in_ptr2 + (4 * x1 + 16 * x3 + 64 * x2 + (-8 + x0)), tmp15 & xmask, eviction_policy='evict_last', other=0.0) tmp19 = tmp18 * tmp11 tmp20 = tl.full(tmp19.shape, 0.0, tmp19.dtype) tmp21 = tl.where(tmp15, tmp19, tmp20) tmp22 = tl.where(tmp9, tmp14, tmp21) tmp23 = tl.where(tmp4, tmp5, tmp22) tl.store(out_ptr0 + x5, tmp23, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((1, 64, 8), (512, 8, 1), torch.float32) get_raw_stream(0) triton_poi_fused_replication_pad1d_0[grid(512)](arg0_1, buf0, 512, XBLOCK=128, num_warps=4, num_stages=1) buf1 = empty_strided_cuda((64, 1, 5), (5, 5, 1), torch.float32) triton_poi_fused_arange_repeat_1[grid(320)](buf1, 320, XBLOCK=256, num_warps=4, num_stages=1) buf2 = extern_kernels.convolution(buf0, buf1, stride=(1,), padding= (0,), dilation=(1,), transposed=False, output_padding=(0,), groups=64, bias=None) assert_size_stride(buf2, (1, 64, 4), (256, 4, 1)) buf3 = buf0 del buf0 triton_poi_fused_replication_pad1d_2[grid(512)](buf2, buf3, 512, XBLOCK=256, num_warps=4, num_stages=1) buf4 = buf1 del buf1 triton_poi_fused_arange_repeat_1[grid(320)](buf4, 320, XBLOCK=256, num_warps=4, num_stages=1) buf5 = extern_kernels.convolution(buf3, buf4, stride=(1,), padding= (0,), dilation=(1,), transposed=False, output_padding=(0,), groups=64, bias=None) assert_size_stride(buf5, (1, 64, 4), (256, 4, 1)) del buf3 del buf4 buf6 = empty_strided_cuda((4, 4, 4, 12), (192, 48, 12, 1), torch. float32) triton_poi_fused_cat_3[grid(768)](arg0_1, buf2, buf5, buf6, 768, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 del buf2 del buf5 return buf6, class DeltaNew(nn.Module): def __init__(self, order=2, **kwargs): super(DeltaNew, self).__init__() self.order = order self.compute_delta = transforms.ComputeDeltas(**kwargs) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
AyushExel/s3prl
Delta
false
2,029
[ "MIT" ]
0
6531904e9621a778978b9cfef3ba9f582e56639a
https://github.com/AyushExel/s3prl/tree/6531904e9621a778978b9cfef3ba9f582e56639a
import torch import torch.nn as nn from torchaudio import transforms class Model(nn.Module): def __init__(self, order=2, **kwargs): super().__init__() self.order = order self.compute_delta = transforms.ComputeDeltas(**kwargs) def forward(self, x): feats = [x] for o in range(self.order): feat = feats[-1].transpose(0, 1).unsqueeze(0) delta = self.compute_delta(feat) feats.append(delta.squeeze(0).transpose(0, 1)) x = torch.cat(feats, dim=-1) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
InnerProductDecoder
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/sd/csdpxpt6qkkz7yt7aexzdtstgbnmu45cps6zaio7uh6d6ppkfs2l.py # Topologically Sorted Source Nodes: [getitem_1, getitem_3, mul, value, sigmoid], Original ATen: [aten.index, aten.mul, aten.sum, aten.sigmoid] # Source node to ATen node mapping: # getitem_1 => index # getitem_3 => index_1 # mul => mul # sigmoid => sigmoid # value => sum_1 # Graph fragment: # %index : [num_users=1] = call_function[target=torch.ops.aten.index.Tensor](args = (%arg1_1, [%select]), kwargs = {}) # %index_1 : [num_users=1] = call_function[target=torch.ops.aten.index.Tensor](args = (%arg1_1, [%select_1]), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%index, %index_1), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {}) # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%sum_1,), kwargs = {}) triton_poi_fused_index_mul_sigmoid_sum_0 = async_compile.triton('triton_poi_fused_index_mul_sigmoid_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*i64', 1: '*i64', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_index_mul_sigmoid_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_index_mul_sigmoid_sum_0(in_ptr0, in_ptr1, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp7 = tl.load(in_ptr0 + (4 + x0), xmask) tmp1 = tl.full([XBLOCK], 4, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tl.device_assert(((0 <= tmp4) & (tmp4 < 4)) | ~(xmask), "index out of bounds: 0 <= tmp4 < 4") tmp6 = tl.load(in_ptr1 + (4*tmp4), xmask, eviction_policy='evict_last') tmp8 = tmp7 + tmp1 tmp9 = tmp7 < 0 tmp10 = tl.where(tmp9, tmp8, tmp7) tl.device_assert(((0 <= tmp10) & (tmp10 < 4)) | ~(xmask), "index out of bounds: 0 <= tmp10 < 4") tmp12 = tl.load(in_ptr1 + (4*tmp10), xmask, eviction_policy='evict_last') tmp13 = tmp6 * tmp12 tmp14 = tl.load(in_ptr1 + (1 + (4*tmp4)), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr1 + (1 + (4*tmp10)), xmask, eviction_policy='evict_last') tmp16 = tmp14 * tmp15 tmp17 = tmp13 + tmp16 tmp18 = tl.load(in_ptr1 + (2 + (4*tmp4)), xmask, eviction_policy='evict_last') tmp19 = tl.load(in_ptr1 + (2 + (4*tmp10)), xmask, eviction_policy='evict_last') tmp20 = tmp18 * tmp19 tmp21 = tmp17 + tmp20 tmp22 = tl.load(in_ptr1 + (3 + (4*tmp4)), xmask, eviction_policy='evict_last') tmp23 = tl.load(in_ptr1 + (3 + (4*tmp10)), xmask, eviction_policy='evict_last') tmp24 = tmp22 * tmp23 tmp25 = tmp21 + tmp24 tmp26 = tmp25.to(tl.float32) tmp27 = tl.sigmoid(tmp26) tl.store(out_ptr1 + (x0), tmp27, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((4, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [getitem_1, getitem_3, mul, value, sigmoid], Original ATen: [aten.index, aten.mul, aten.sum, aten.sigmoid] stream0 = get_raw_stream(0) triton_poi_fused_index_mul_sigmoid_sum_0.run(arg0_1, arg1_1, buf1, 4, grid=grid(4), stream=stream0) del arg0_1 del arg1_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.int64) arg1_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.int64) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.utils.data class InnerProductDecoder(torch.nn.Module): """The inner product decoder from the `"Variational Graph Auto-Encoders" <https://arxiv.org/abs/1611.07308>`_ paper .. math:: \\sigma(\\mathbf{Z}\\mathbf{Z}^{\\top}) where :math:`\\mathbf{Z} \\in \\mathbb{R}^{N \\times d}` denotes the latent space produced by the encoder.""" def forward(self, z, edge_index, sigmoid=True): """Decodes the latent variables :obj:`z` into edge probabilities for the given node-pairs :obj:`edge_index`. Args: z (Tensor): The latent space :math:`\\mathbf{Z}`. sigmoid (bool, optional): If set to :obj:`False`, does not apply the logistic sigmoid function to the output. (default: :obj:`True`) """ value = (z[edge_index[0]] * z[edge_index[1]]).sum(dim=1) return torch.sigmoid(value) if sigmoid else value def forward_all(self, z, sigmoid=True): """Decodes the latent variables :obj:`z` into a probabilistic dense adjacency matrix. Args: z (Tensor): The latent space :math:`\\mathbf{Z}`. sigmoid (bool, optional): If set to :obj:`False`, does not apply the logistic sigmoid function to the output. (default: :obj:`True`) """ adj = torch.matmul(z, z.t()) return torch.sigmoid(adj) if sigmoid else adj def get_inputs(): return [torch.ones([4, 4], dtype=torch.int64), torch.ones([4, 4], dtype =torch.int64)] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_index_mul_sigmoid_sum_0(in_ptr0, in_ptr1, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp7 = tl.load(in_ptr0 + (4 + x0), xmask) tmp1 = tl.full([XBLOCK], 4, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tl.device_assert((0 <= tmp4) & (tmp4 < 4) | ~xmask, 'index out of bounds: 0 <= tmp4 < 4') tmp6 = tl.load(in_ptr1 + 4 * tmp4, xmask, eviction_policy='evict_last') tmp8 = tmp7 + tmp1 tmp9 = tmp7 < 0 tmp10 = tl.where(tmp9, tmp8, tmp7) tl.device_assert((0 <= tmp10) & (tmp10 < 4) | ~xmask, 'index out of bounds: 0 <= tmp10 < 4') tmp12 = tl.load(in_ptr1 + 4 * tmp10, xmask, eviction_policy='evict_last') tmp13 = tmp6 * tmp12 tmp14 = tl.load(in_ptr1 + (1 + 4 * tmp4), xmask, eviction_policy= 'evict_last') tmp15 = tl.load(in_ptr1 + (1 + 4 * tmp10), xmask, eviction_policy= 'evict_last') tmp16 = tmp14 * tmp15 tmp17 = tmp13 + tmp16 tmp18 = tl.load(in_ptr1 + (2 + 4 * tmp4), xmask, eviction_policy= 'evict_last') tmp19 = tl.load(in_ptr1 + (2 + 4 * tmp10), xmask, eviction_policy= 'evict_last') tmp20 = tmp18 * tmp19 tmp21 = tmp17 + tmp20 tmp22 = tl.load(in_ptr1 + (3 + 4 * tmp4), xmask, eviction_policy= 'evict_last') tmp23 = tl.load(in_ptr1 + (3 + 4 * tmp10), xmask, eviction_policy= 'evict_last') tmp24 = tmp22 * tmp23 tmp25 = tmp21 + tmp24 tmp26 = tmp25.to(tl.float32) tmp27 = tl.sigmoid(tmp26) tl.store(out_ptr1 + x0, tmp27, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((4,), (1,), torch.float32) get_raw_stream(0) triton_poi_fused_index_mul_sigmoid_sum_0[grid(4)](arg0_1, arg1_1, buf1, 4, XBLOCK=4, num_warps=1, num_stages=1) del arg0_1 del arg1_1 return buf1, class InnerProductDecoderNew(torch.nn.Module): """The inner product decoder from the `"Variational Graph Auto-Encoders" <https://arxiv.org/abs/1611.07308>`_ paper .. math:: \\sigma(\\mathbf{Z}\\mathbf{Z}^{\\top}) where :math:`\\mathbf{Z} \\in \\mathbb{R}^{N \\times d}` denotes the latent space produced by the encoder.""" def forward_all(self, z, sigmoid=True): """Decodes the latent variables :obj:`z` into a probabilistic dense adjacency matrix. Args: z (Tensor): The latent space :math:`\\mathbf{Z}`. sigmoid (bool, optional): If set to :obj:`False`, does not apply the logistic sigmoid function to the output. (default: :obj:`True`) """ adj = torch.matmul(z, z.t()) return torch.sigmoid(adj) if sigmoid else adj def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
CFF-Dream/pytorch_geometric
InnerProductDecoder
false
2,030
[ "MIT" ]
0
7c19ad74957409ee9e07314ce81524b3113b9c84
https://github.com/CFF-Dream/pytorch_geometric/tree/7c19ad74957409ee9e07314ce81524b3113b9c84
import torch import torch.utils.data class Model(torch.nn.Module): """The inner product decoder from the `"Variational Graph Auto-Encoders" <https://arxiv.org/abs/1611.07308>`_ paper .. math:: \\sigma(\\mathbf{Z}\\mathbf{Z}^{\\top}) where :math:`\\mathbf{Z} \\in \\mathbb{R}^{N \\times d}` denotes the latent space produced by the encoder.""" def forward(self, z, edge_index, sigmoid=True): """Decodes the latent variables :obj:`z` into edge probabilities for the given node-pairs :obj:`edge_index`. Args: z (Tensor): The latent space :math:`\\mathbf{Z}`. sigmoid (bool, optional): If set to :obj:`False`, does not apply the logistic sigmoid function to the output. (default: :obj:`True`) """ value = (z[edge_index[0]] * z[edge_index[1]]).sum(dim=1) return torch.sigmoid(value) if sigmoid else value def forward_all(self, z, sigmoid=True): """Decodes the latent variables :obj:`z` into a probabilistic dense adjacency matrix. Args: z (Tensor): The latent space :math:`\\mathbf{Z}`. sigmoid (bool, optional): If set to :obj:`False`, does not apply the logistic sigmoid function to the output. (default: :obj:`True`) """ adj = torch.matmul(z, z.t()) return torch.sigmoid(adj) if sigmoid else adj def get_inputs(): return [torch.ones([4, 4], dtype=torch.int64), torch.ones([4, 4], dtype =torch.int64)] def get_init_inputs(): return []
DenseGraphConv
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/3i/c3iibarqokkpdvrjfwijmnifcbse5v3c5vj2oqjzbj6rok54jxf5.py # Topologically Sorted Source Nodes: [out], Original ATen: [aten.clone] # Source node to ATen node mapping: # out => clone # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 64 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/w2/cw2bwqpq3dkexeyqz25khcvdcedkdcrcwpb7zrtd6eayijd5lgez.py # Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.add] # Source node to ATen node mapping: # out_2 => add # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_4, %view_6), kwargs = {}) triton_poi_fused_add_1 = async_compile.triton('triton_poi_fused_add_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x2), xmask) tmp2 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [out], Original ATen: [aten.clone] stream0 = get_raw_stream(0) triton_poi_fused_clone_0.run(primals_2, buf0, 256, grid=grid(256), stream=stream0) del primals_2 buf1 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [out], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf0, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(primals_1, (16, 4, 4), (16, 4, 1), 0), out=buf1) buf2 = reinterpret_tensor(buf0, (64, 4), (4, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), primals_3, out=buf2) del primals_3 buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf3) del primals_4 buf4 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf2 # reuse # Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.add] triton_poi_fused_add_1.run(buf4, buf3, primals_5, 256, grid=grid(256), stream=stream0) del buf3 del primals_5 return (buf4, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (4, 64), (1, 4), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch from torch.nn import Parameter import torch.utils.data def uniform(size, tensor): bound = 1.0 / math.sqrt(size) if tensor is not None: tensor.data.uniform_(-bound, bound) class DenseGraphConv(torch.nn.Module): """See :class:`torch_geometric.nn.conv.GraphConv`. """ def __init__(self, in_channels, out_channels, aggr='add', bias=True): assert aggr in ['add', 'mean', 'max'] super(DenseGraphConv, self).__init__() self.in_channels = in_channels self.out_channels = out_channels self.aggr = aggr self.weight = Parameter(torch.Tensor(in_channels, out_channels)) self.lin = torch.nn.Linear(in_channels, out_channels, bias=bias) self.reset_parameters() def reset_parameters(self): uniform(self.in_channels, self.weight) self.lin.reset_parameters() def forward(self, x, adj, mask=None): """ Args: x (Tensor): Node feature tensor :math:`\\mathbf{X} \\in \\mathbb{R}^{B \\times N \\times F}`, with batch-size :math:`B`, (maximum) number of nodes :math:`N` for each graph, and feature dimension :math:`F`. adj (Tensor): Adjacency tensor :math:`\\mathbf{A} \\in \\mathbb{R}^{B \\times N \\times N}`. The adjacency tensor is broadcastable in the batch dimension, resulting in a shared adjacency matrix for the complete batch. mask (BoolTensor, optional): Mask matrix :math:`\\mathbf{M} \\in {\\{ 0, 1 \\}}^{B \\times N}` indicating the valid nodes for each graph. (default: :obj:`None`) """ x = x.unsqueeze(0) if x.dim() == 2 else x adj = adj.unsqueeze(0) if adj.dim() == 2 else adj B, N, _ = adj.size() out = torch.matmul(adj, x) out = torch.matmul(out, self.weight) if self.aggr == 'mean': out = out / adj.sum(dim=-1, keepdim=True).clamp(min=1) elif self.aggr == 'max': out = out.max(dim=-1)[0] out = out + self.lin(x) if mask is not None: out = out * mask.view(B, N, 1) return out def __repr__(self): return '{}({}, {})'.format(self.__class__.__name__, self. in_channels, self.out_channels) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import math from torch.nn import Parameter import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 64 x2 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x2, tmp0, xmask) @triton.jit def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x2, xmask) tmp2 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(256)](primals_2, buf0, 256, XBLOCK= 256, num_warps=4, num_stages=1) del primals_2 buf1 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf0, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(primals_1, (16, 4, 4), (16, 4, 1), 0), out=buf1) buf2 = reinterpret_tensor(buf0, (64, 4), (4, 1), 0) del buf0 extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), primals_3, out=buf2) del primals_3 buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf3) del primals_4 buf4 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf2 triton_poi_fused_add_1[grid(256)](buf4, buf3, primals_5, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf3 del primals_5 return buf4, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (4, 64), (1, 4), 0) def uniform(size, tensor): bound = 1.0 / math.sqrt(size) if tensor is not None: tensor.data.uniform_(-bound, bound) class DenseGraphConvNew(torch.nn.Module): """See :class:`torch_geometric.nn.conv.GraphConv`. """ def __init__(self, in_channels, out_channels, aggr='add', bias=True): assert aggr in ['add', 'mean', 'max'] super(DenseGraphConvNew, self).__init__() self.in_channels = in_channels self.out_channels = out_channels self.aggr = aggr self.weight = Parameter(torch.Tensor(in_channels, out_channels)) self.lin = torch.nn.Linear(in_channels, out_channels, bias=bias) self.reset_parameters() def reset_parameters(self): uniform(self.in_channels, self.weight) self.lin.reset_parameters() def __repr__(self): return '{}({}, {})'.format(self.__class__.__name__, self. in_channels, self.out_channels) def forward(self, input_0, input_1): primals_3 = self.weight primals_4 = self.lin.weight primals_5 = self.lin.bias primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
CFF-Dream/pytorch_geometric
DenseGraphConv
false
2,031
[ "MIT" ]
0
7c19ad74957409ee9e07314ce81524b3113b9c84
https://github.com/CFF-Dream/pytorch_geometric/tree/7c19ad74957409ee9e07314ce81524b3113b9c84
import math import torch from torch.nn import Parameter import torch.utils.data def uniform(size, tensor): bound = 1.0 / math.sqrt(size) if tensor is not None: tensor.data.uniform_(-bound, bound) class Model(torch.nn.Module): """See :class:`torch_geometric.nn.conv.GraphConv`. """ def __init__(self, in_channels, out_channels, aggr='add', bias=True): assert aggr in ['add', 'mean', 'max'] super().__init__() self.in_channels = in_channels self.out_channels = out_channels self.aggr = aggr self.weight = Parameter(torch.Tensor(in_channels, out_channels)) self.lin = torch.nn.Linear(in_channels, out_channels, bias=bias) self.reset_parameters() def reset_parameters(self): uniform(self.in_channels, self.weight) self.lin.reset_parameters() def forward(self, x, adj, mask=None): """ Args: x (Tensor): Node feature tensor :math:`\\mathbf{X} \\in \\mathbb{R}^{B \\times N \\times F}`, with batch-size :math:`B`, (maximum) number of nodes :math:`N` for each graph, and feature dimension :math:`F`. adj (Tensor): Adjacency tensor :math:`\\mathbf{A} \\in \\mathbb{R}^{B \\times N \\times N}`. The adjacency tensor is broadcastable in the batch dimension, resulting in a shared adjacency matrix for the complete batch. mask (BoolTensor, optional): Mask matrix :math:`\\mathbf{M} \\in {\\{ 0, 1 \\}}^{B \\times N}` indicating the valid nodes for each graph. (default: :obj:`None`) """ x = x.unsqueeze(0) if x.dim() == 2 else x adj = adj.unsqueeze(0) if adj.dim() == 2 else adj B, N, _ = adj.size() out = torch.matmul(adj, x) out = torch.matmul(out, self.weight) if self.aggr == 'mean': out = out / adj.sum(dim=-1, keepdim=True).clamp(min=1) elif self.aggr == 'max': out = out.max(dim=-1)[0] out = out + self.lin(x) if mask is not None: out = out * mask.view(B, N, 1) return out def __repr__(self): return '{}({}, {})'.format(self.__class__.__name__, self. in_channels, self.out_channels) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [4, 4]
MultiHeadAttention
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/cu/ccutvo2v4333pq6xhrg2zryqqwthm7dmmuqprvva2xdwiodpz5jn.py # Topologically Sorted Source Nodes: [q], Original ATen: [aten.convolution] # Source node to ATen node mapping: # q => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1], [0], [1], False, [0], 1), kwargs = {}) triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 4) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/s2/cs2rk3o3kmhydx4oijp6rsdb5atcrq5axy4adadrpl7gkt7scies.py # Topologically Sorted Source Nodes: [p_attn], Original ATen: [aten._softmax] # Source node to ATen node mapping: # p_attn => exp # Graph fragment: # %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_5, 1), kwargs = {}) # %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [-1], True), kwargs = {}) # %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {}) # %div_tensor : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor, 1.0), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor,), kwargs = {}) triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp3 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = tmp14 * tmp1 tmp16 = tl_math.exp(tmp15) tl.store(out_ptr0 + (x2), tmp16, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/3f/c3fx6bzkalkw7u7askqdnz4rzlcoyqiec4r434sjc5x3axxgkrmr.py # Topologically Sorted Source Nodes: [p_attn], Original ATen: [aten._softmax] # Source node to ATen node mapping: # p_attn => div_1, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10 = args args.clear() assert_size_stride(primals_1, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_7, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_8, (4, ), (1, )) assert_size_stride(primals_9, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_10, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [q], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4), (16, 4, 1)) # Topologically Sorted Source Nodes: [k], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(primals_6, primals_4, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4), (16, 4, 1)) # Topologically Sorted Source Nodes: [v], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(primals_6, primals_7, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 4), (16, 4, 1)) buf3 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [q], Original ATen: [aten.convolution] stream0 = get_raw_stream(0) triton_poi_fused_convolution_0.run(buf3, primals_2, 64, grid=grid(64), stream=stream0) del primals_2 buf4 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [k], Original ATen: [aten.convolution] triton_poi_fused_convolution_0.run(buf4, primals_5, 64, grid=grid(64), stream=stream0) del primals_5 buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5) buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [p_attn], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf5, buf6, 256, grid=grid(256), stream=stream0) buf7 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf5 # reuse # Topologically Sorted Source Nodes: [p_attn], Original ATen: [aten._softmax] triton_poi_fused__softmax_2.run(buf6, buf7, 256, grid=grid(256), stream=stream0) del buf6 buf8 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [v], Original ATen: [aten.convolution] triton_poi_fused_convolution_0.run(buf8, primals_8, 64, grid=grid(64), stream=stream0) del primals_8 buf9 = empty_strided_cuda((16, 4, 1), (4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [output], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf8, (16, 4, 1), (4, 1, 0), 0), out=buf9) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] buf10 = extern_kernels.convolution(reinterpret_tensor(buf9, (4, 4, 4), (16, 4, 1), 0), primals_9, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf10, (4, 4, 4), (16, 4, 1)) buf11 = buf10; del buf10 # reuse # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] triton_poi_fused_convolution_0.run(buf11, primals_10, 64, grid=grid(64), stream=stream0) del primals_10 return (buf11, buf7, primals_1, primals_3, primals_4, primals_6, primals_7, primals_9, buf7, reinterpret_tensor(buf9, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf8, (16, 1, 4), (4, 4, 1), 0), reinterpret_tensor(buf3, (16, 1, 4), (4, 4, 1), 0), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch from torch import nn from torch.nn import functional as F import torch.utils.data class MultiHeadAttention(nn.Module): def __init__(self, channels, out_channels, n_heads, window_size=None, heads_share=True, p_dropout=0.0, block_length=None, proximal_bias= False, proximal_init=False): super().__init__() assert channels % n_heads == 0 self.channels = channels self.out_channels = out_channels self.n_heads = n_heads self.window_size = window_size self.heads_share = heads_share self.block_length = block_length self.proximal_bias = proximal_bias self.p_dropout = p_dropout self.attn = None self.k_channels = channels // n_heads self.conv_q = nn.Conv1d(channels, channels, 1) self.conv_k = nn.Conv1d(channels, channels, 1) self.conv_v = nn.Conv1d(channels, channels, 1) if window_size is not None: n_heads_rel = 1 if heads_share else n_heads rel_stddev = self.k_channels ** -0.5 self.emb_rel_k = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev) self.emb_rel_v = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev) self.conv_o = nn.Conv1d(channels, out_channels, 1) self.drop = nn.Dropout(p_dropout) nn.init.xavier_uniform_(self.conv_q.weight) nn.init.xavier_uniform_(self.conv_k.weight) if proximal_init: self.conv_k.weight.data.copy_(self.conv_q.weight.data) self.conv_k.bias.data.copy_(self.conv_q.bias.data) nn.init.xavier_uniform_(self.conv_v.weight) def forward(self, x, c, attn_mask=None): q = self.conv_q(x) k = self.conv_k(c) v = self.conv_v(c) x, self.attn = self.attention(q, k, v, mask=attn_mask) x = self.conv_o(x) return x def attention(self, query, key, value, mask=None): b, d, t_s, t_t = *key.size(), query.size(2) query = query.view(b, self.n_heads, self.k_channels, t_t).transpose( 2, 3) key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) value = value.view(b, self.n_heads, self.k_channels, t_s).transpose( 2, 3) scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(self .k_channels) if self.window_size is not None: assert t_s == t_t, 'Relative attention is only available for self-attention.' key_relative_embeddings = self._get_relative_embeddings(self. emb_rel_k, t_s) rel_logits = self._matmul_with_relative_keys(query, key_relative_embeddings) rel_logits = self._relative_position_to_absolute_position( rel_logits) scores_local = rel_logits / math.sqrt(self.k_channels) scores = scores + scores_local if self.proximal_bias: assert t_s == t_t, 'Proximal bias is only available for self-attention.' scores = scores + self._attention_bias_proximal(t_s) if mask is not None: scores = scores.masked_fill(mask == 0, -10000.0) if self.block_length is not None: block_mask = torch.ones_like(scores).triu(-self.block_length ).tril(self.block_length) scores = scores * block_mask + -10000.0 * (1 - block_mask) p_attn = F.softmax(scores, dim=-1) p_attn = self.drop(p_attn) output = torch.matmul(p_attn, value) if self.window_size is not None: relative_weights = self._absolute_position_to_relative_position( p_attn) value_relative_embeddings = self._get_relative_embeddings(self. emb_rel_v, t_s) output = output + self._matmul_with_relative_values( relative_weights, value_relative_embeddings) output = output.transpose(2, 3).contiguous().view(b, d, t_t) return output, p_attn def _matmul_with_relative_values(self, x, y): """ x: [b, h, l, m] y: [h or 1, m, d] ret: [b, h, l, d] """ ret = torch.matmul(x, y.unsqueeze(0)) return ret def _matmul_with_relative_keys(self, x, y): """ x: [b, h, l, d] y: [h or 1, m, d] ret: [b, h, l, m] """ ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1)) return ret def _get_relative_embeddings(self, relative_embeddings, length): 2 * self.window_size + 1 pad_length = max(length - (self.window_size + 1), 0) slice_start_position = max(self.window_size + 1 - length, 0) slice_end_position = slice_start_position + 2 * length - 1 if pad_length > 0: padded_relative_embeddings = F.pad(relative_embeddings, commons .convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]])) else: padded_relative_embeddings = relative_embeddings used_relative_embeddings = padded_relative_embeddings[:, slice_start_position:slice_end_position] return used_relative_embeddings def _relative_position_to_absolute_position(self, x): """ x: [b, h, l, 2*l-1] ret: [b, h, l, l] """ batch, heads, length, _ = x.size() x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, 1]])) x_flat = x.view([batch, heads, length * 2 * length]) x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [ 0, length - 1]])) x_final = x_flat.view([batch, heads, length + 1, 2 * length - 1])[:, :, :length, length - 1:] return x_final def _absolute_position_to_relative_position(self, x): """ x: [b, h, l, l] ret: [b, h, l, 2*l-1] """ batch, heads, length, _ = x.size() x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length - 1]])) x_flat = x.view([batch, heads, length ** 2 + length * (length - 1)]) x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [ length, 0]])) x_final = x_flat.view([batch, heads, length, 2 * length])[:, :, :, 1:] return x_final def _attention_bias_proximal(self, length): """Bias for self-attention to encourage attention to close positions. Args: length: an integer scalar. Returns: a Tensor with shape [1, 1, length, length] """ r = torch.arange(length, dtype=torch.float32) diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1) return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff) ), 0), 0) def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'channels': 4, 'out_channels': 4, 'n_heads': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import math from torch import nn from torch.nn import functional as F import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = tmp14 * tmp1 tmp16 = tl_math.exp(tmp15) tl.store(out_ptr0 + x2, tmp16, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10) = args args.clear() assert_size_stride(primals_1, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_7, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_10, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4), (16, 4, 1)) buf1 = extern_kernels.convolution(primals_6, primals_4, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4), (16, 4, 1)) buf2 = extern_kernels.convolution(primals_6, primals_7, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 4), (16, 4, 1)) buf3 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(64)](buf3, primals_2, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_2 buf4 = buf1 del buf1 triton_poi_fused_convolution_0[grid(64)](buf4, primals_5, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_5 buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5) buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_1[grid(256)](buf5, buf6, 256, XBLOCK=256, num_warps=4, num_stages=1) buf7 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf5 triton_poi_fused__softmax_2[grid(256)](buf6, buf7, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf6 buf8 = buf2 del buf2 triton_poi_fused_convolution_0[grid(64)](buf8, primals_8, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_8 buf9 = empty_strided_cuda((16, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf8, (16, 4, 1), (4, 1, 0), 0), out=buf9) buf10 = extern_kernels.convolution(reinterpret_tensor(buf9, (4, 4, 4), (16, 4, 1), 0), primals_9, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf10, (4, 4, 4), (16, 4, 1)) buf11 = buf10 del buf10 triton_poi_fused_convolution_0[grid(64)](buf11, primals_10, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_10 return (buf11, buf7, primals_1, primals_3, primals_4, primals_6, primals_7, primals_9, buf7, reinterpret_tensor(buf9, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf8, (16, 1, 4), (4, 4, 1), 0), reinterpret_tensor(buf3, (16, 1, 4), (4, 4, 1), 0), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0)) class MultiHeadAttentionNew(nn.Module): def __init__(self, channels, out_channels, n_heads, window_size=None, heads_share=True, p_dropout=0.0, block_length=None, proximal_bias= False, proximal_init=False): super().__init__() assert channels % n_heads == 0 self.channels = channels self.out_channels = out_channels self.n_heads = n_heads self.window_size = window_size self.heads_share = heads_share self.block_length = block_length self.proximal_bias = proximal_bias self.p_dropout = p_dropout self.attn = None self.k_channels = channels // n_heads self.conv_q = nn.Conv1d(channels, channels, 1) self.conv_k = nn.Conv1d(channels, channels, 1) self.conv_v = nn.Conv1d(channels, channels, 1) if window_size is not None: n_heads_rel = 1 if heads_share else n_heads rel_stddev = self.k_channels ** -0.5 self.emb_rel_k = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev) self.emb_rel_v = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev) self.conv_o = nn.Conv1d(channels, out_channels, 1) self.drop = nn.Dropout(p_dropout) nn.init.xavier_uniform_(self.conv_q.weight) nn.init.xavier_uniform_(self.conv_k.weight) if proximal_init: self.conv_k.weight.data.copy_(self.conv_q.weight.data) self.conv_k.bias.data.copy_(self.conv_q.bias.data) nn.init.xavier_uniform_(self.conv_v.weight) def attention(self, query, key, value, mask=None): b, d, t_s, t_t = *key.size(), query.size(2) query = query.view(b, self.n_heads, self.k_channels, t_t).transpose( 2, 3) key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) value = value.view(b, self.n_heads, self.k_channels, t_s).transpose( 2, 3) scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(self .k_channels) if self.window_size is not None: assert t_s == t_t, 'Relative attention is only available for self-attention.' key_relative_embeddings = self._get_relative_embeddings(self. emb_rel_k, t_s) rel_logits = self._matmul_with_relative_keys(query, key_relative_embeddings) rel_logits = self._relative_position_to_absolute_position( rel_logits) scores_local = rel_logits / math.sqrt(self.k_channels) scores = scores + scores_local if self.proximal_bias: assert t_s == t_t, 'Proximal bias is only available for self-attention.' scores = scores + self._attention_bias_proximal(t_s) if mask is not None: scores = scores.masked_fill(mask == 0, -10000.0) if self.block_length is not None: block_mask = torch.ones_like(scores).triu(-self.block_length ).tril(self.block_length) scores = scores * block_mask + -10000.0 * (1 - block_mask) p_attn = F.softmax(scores, dim=-1) p_attn = self.drop(p_attn) output = torch.matmul(p_attn, value) if self.window_size is not None: relative_weights = self._absolute_position_to_relative_position( p_attn) value_relative_embeddings = self._get_relative_embeddings(self. emb_rel_v, t_s) output = output + self._matmul_with_relative_values( relative_weights, value_relative_embeddings) output = output.transpose(2, 3).contiguous().view(b, d, t_t) return output, p_attn def _matmul_with_relative_values(self, x, y): """ x: [b, h, l, m] y: [h or 1, m, d] ret: [b, h, l, d] """ ret = torch.matmul(x, y.unsqueeze(0)) return ret def _matmul_with_relative_keys(self, x, y): """ x: [b, h, l, d] y: [h or 1, m, d] ret: [b, h, l, m] """ ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1)) return ret def _get_relative_embeddings(self, relative_embeddings, length): 2 * self.window_size + 1 pad_length = max(length - (self.window_size + 1), 0) slice_start_position = max(self.window_size + 1 - length, 0) slice_end_position = slice_start_position + 2 * length - 1 if pad_length > 0: padded_relative_embeddings = F.pad(relative_embeddings, commons .convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]])) else: padded_relative_embeddings = relative_embeddings used_relative_embeddings = padded_relative_embeddings[:, slice_start_position:slice_end_position] return used_relative_embeddings def _relative_position_to_absolute_position(self, x): """ x: [b, h, l, 2*l-1] ret: [b, h, l, l] """ batch, heads, length, _ = x.size() x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, 1]])) x_flat = x.view([batch, heads, length * 2 * length]) x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [ 0, length - 1]])) x_final = x_flat.view([batch, heads, length + 1, 2 * length - 1])[:, :, :length, length - 1:] return x_final def _absolute_position_to_relative_position(self, x): """ x: [b, h, l, l] ret: [b, h, l, 2*l-1] """ batch, heads, length, _ = x.size() x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length - 1]])) x_flat = x.view([batch, heads, length ** 2 + length * (length - 1)]) x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [ length, 0]])) x_final = x_flat.view([batch, heads, length, 2 * length])[:, :, :, 1:] return x_final def _attention_bias_proximal(self, length): """Bias for self-attention to encourage attention to close positions. Args: length: an integer scalar. Returns: a Tensor with shape [1, 1, length, length] """ r = torch.arange(length, dtype=torch.float32) diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1) return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff) ), 0), 0) def forward(self, input_0, input_1): primals_1 = self.conv_q.weight primals_2 = self.conv_q.bias primals_4 = self.conv_k.weight primals_5 = self.conv_k.bias primals_7 = self.conv_v.weight primals_8 = self.conv_v.bias primals_9 = self.conv_o.weight primals_10 = self.conv_o.bias primals_3 = input_0 primals_6 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10]) return output[0]
AndreHe02/glow-tts
MultiHeadAttention
false
2,032
[ "MIT" ]
0
683f68f17790f2f46c23e9d3eadbcac352d82e2b
https://github.com/AndreHe02/glow-tts/tree/683f68f17790f2f46c23e9d3eadbcac352d82e2b
import math import torch from torch import nn from torch.nn import functional as F import torch.utils.data class Model(nn.Module): def __init__(self, channels, out_channels, n_heads, window_size=None, heads_share=True, p_dropout=0.0, block_length=None, proximal_bias= False, proximal_init=False): super().__init__() assert channels % n_heads == 0 self.channels = channels self.out_channels = out_channels self.n_heads = n_heads self.window_size = window_size self.heads_share = heads_share self.block_length = block_length self.proximal_bias = proximal_bias self.p_dropout = p_dropout self.attn = None self.k_channels = channels // n_heads self.conv_q = nn.Conv1d(channels, channels, 1) self.conv_k = nn.Conv1d(channels, channels, 1) self.conv_v = nn.Conv1d(channels, channels, 1) if window_size is not None: n_heads_rel = 1 if heads_share else n_heads rel_stddev = self.k_channels ** -0.5 self.emb_rel_k = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev) self.emb_rel_v = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev) self.conv_o = nn.Conv1d(channels, out_channels, 1) self.drop = nn.Dropout(p_dropout) nn.init.xavier_uniform_(self.conv_q.weight) nn.init.xavier_uniform_(self.conv_k.weight) if proximal_init: self.conv_k.weight.data.copy_(self.conv_q.weight.data) self.conv_k.bias.data.copy_(self.conv_q.bias.data) nn.init.xavier_uniform_(self.conv_v.weight) def forward(self, x, c, attn_mask=None): q = self.conv_q(x) k = self.conv_k(c) v = self.conv_v(c) x, self.attn = self.attention(q, k, v, mask=attn_mask) x = self.conv_o(x) return x def attention(self, query, key, value, mask=None): b, d, t_s, t_t = *key.size(), query.size(2) query = query.view(b, self.n_heads, self.k_channels, t_t).transpose( 2, 3) key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) value = value.view(b, self.n_heads, self.k_channels, t_s).transpose( 2, 3) scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(self .k_channels) if self.window_size is not None: assert t_s == t_t, 'Relative attention is only available for self-attention.' key_relative_embeddings = self._get_relative_embeddings(self. emb_rel_k, t_s) rel_logits = self._matmul_with_relative_keys(query, key_relative_embeddings) rel_logits = self._relative_position_to_absolute_position( rel_logits) scores_local = rel_logits / math.sqrt(self.k_channels) scores = scores + scores_local if self.proximal_bias: assert t_s == t_t, 'Proximal bias is only available for self-attention.' scores = scores + self._attention_bias_proximal(t_s) if mask is not None: scores = scores.masked_fill(mask == 0, -10000.0) if self.block_length is not None: block_mask = torch.ones_like(scores).triu(-self.block_length ).tril(self.block_length) scores = scores * block_mask + -10000.0 * (1 - block_mask) p_attn = F.softmax(scores, dim=-1) p_attn = self.drop(p_attn) output = torch.matmul(p_attn, value) if self.window_size is not None: relative_weights = self._absolute_position_to_relative_position( p_attn) value_relative_embeddings = self._get_relative_embeddings(self. emb_rel_v, t_s) output = output + self._matmul_with_relative_values( relative_weights, # ... truncated (>4000 chars) for memory efficiency
DenseSAGEConv
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/3i/c3iibarqokkpdvrjfwijmnifcbse5v3c5vj2oqjzbj6rok54jxf5.py # Topologically Sorted Source Nodes: [out], Original ATen: [aten.clone] # Source node to ATen node mapping: # out => clone # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 64 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/xe/cxee5whob4lailfzzftamemmtlwuidgjebrqgd7sk2xvpsoxxfeg.py # Topologically Sorted Source Nodes: [sum_1, clamp, out_1], Original ATen: [aten.sum, aten.clamp, aten.div] # Source node to ATen node mapping: # clamp => clamp_min # out_1 => div # sum_1 => sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%primals_2, [-1], True), kwargs = {}) # %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sum_1, 1), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_2, %clamp_min), kwargs = {}) triton_poi_fused_clamp_div_sum_1 = async_compile.triton('triton_poi_fused_clamp_div_sum_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clamp_div_sum_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clamp_div_sum_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 4) % 16 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = 1.0 tmp9 = triton_helpers.maximum(tmp7, tmp8) tmp10 = tmp0 / tmp9 tl.store(in_out_ptr0 + (x3), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/hf/chf6nxjcs3kt6pwvtybkbv6wthqalgzsxb3h7sa4vlfg2czleczx.py # Topologically Sorted Source Nodes: [out_5], Original ATen: [aten.add] # Source node to ATen node mapping: # out_5 => add_1 # Graph fragment: # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_4, %view_8), kwargs = {}) triton_poi_fused_add_2 = async_compile.triton('triton_poi_fused_add_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x3), xmask) tmp2 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tl.store(in_out_ptr0 + (x3), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (1, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (1, 4, 4), (16, 4, 1)) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [out], Original ATen: [aten.clone] stream0 = get_raw_stream(0) triton_poi_fused_clone_0.run(primals_2, buf0, 256, grid=grid(256), stream=stream0) buf1 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [out], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf0, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(primals_1, (16, 4, 4), (16, 4, 1), 0), out=buf1) buf2 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf1 # reuse # Topologically Sorted Source Nodes: [sum_1, clamp, out_1], Original ATen: [aten.sum, aten.clamp, aten.div] triton_poi_fused_clamp_div_sum_1.run(buf2, primals_2, 256, grid=grid(256), stream=stream0) del primals_2 buf3 = reinterpret_tensor(buf0, (64, 4), (4, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (4, 1), 0), out=buf3) del primals_3 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [out_3], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (4, 1), 0), out=buf4) del primals_4 buf5 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf3 # reuse # Topologically Sorted Source Nodes: [out_5], Original ATen: [aten.add] triton_poi_fused_add_2.run(buf5, buf4, primals_5, 256, grid=grid(256), stream=stream0) del buf4 del primals_5 return (buf5, reinterpret_tensor(primals_1, (4, 64), (1, 4), 0), reinterpret_tensor(buf2, (4, 64), (1, 4), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((1, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((1, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch from torch import Tensor from torch.nn import Linear import torch.nn.functional as F from torch.nn import Parameter import torch.utils.data def uniform(size, tensor): bound = 1.0 / math.sqrt(size) if tensor is not None: tensor.data.uniform_(-bound, bound) def kaiming_uniform(tensor, fan, a): if tensor is not None: bound = math.sqrt(6 / ((1 + a ** 2) * fan)) tensor.data.uniform_(-bound, bound) class DenseSAGEConv(torch.nn.Module): """See :class:`torch_geometric.nn.conv.SAGEConv`. """ def __init__(self, in_channels, out_channels, normalize=False, bias=True): super(DenseSAGEConv, self).__init__() self.in_channels = in_channels self.out_channels = out_channels self.normalize = normalize self.lin_rel = Linear(in_channels, out_channels, bias=False) self.lin_root = Linear(in_channels, out_channels, bias=bias) self.reset_parameters() def reset_parameters(self): self.lin_rel.reset_parameters() self.lin_root.reset_parameters() def forward(self, x, adj, mask=None): """ Args: x (Tensor): Node feature tensor :math:`\\mathbf{X} \\in \\mathbb{R}^{B \\times N \\times F}`, with batch-size :math:`B`, (maximum) number of nodes :math:`N` for each graph, and feature dimension :math:`F`. adj (Tensor): Adjacency tensor :math:`\\mathbf{A} \\in \\mathbb{R}^{B \\times N \\times N}`. The adjacency tensor is broadcastable in the batch dimension, resulting in a shared adjacency matrix for the complete batch. mask (BoolTensor, optional): Mask matrix :math:`\\mathbf{M} \\in {\\{ 0, 1 \\}}^{B \\times N}` indicating the valid nodes for each graph. (default: :obj:`None`) add_loop (bool, optional): If set to :obj:`False`, the layer will not automatically add self-loops to the adjacency matrices. (default: :obj:`True`) """ x = x.unsqueeze(0) if x.dim() == 2 else x adj = adj.unsqueeze(0) if adj.dim() == 2 else adj B, N, _ = adj.size() out = torch.matmul(adj, x) out = out / adj.sum(dim=-1, keepdim=True).clamp(min=1) out = self.lin_rel(out) + self.lin_root(x) if self.normalize: out = F.normalize(out, p=2, dim=-1) if mask is not None: out = out * mask.view(B, N, 1) return out def __repr__(self): return '{}({}, {})'.format(self.__class__.__name__, self. in_channels, self.out_channels) class Linear(torch.nn.Module): def __init__(self, in_channels, out_channels, groups=1, bias=True): super(Linear, self).__init__() assert in_channels % groups == 0 and out_channels % groups == 0 self.in_channels = in_channels self.out_channels = out_channels self.groups = groups self.weight = Parameter(Tensor(groups, in_channels // groups, out_channels // groups)) if bias: self.bias = Parameter(torch.Tensor(out_channels)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): kaiming_uniform(self.weight, fan=self.weight.size(1), a=math.sqrt(5)) uniform(self.weight.size(1), self.bias) def forward(self, src): if self.groups > 1: size = list(src.size())[:-1] src = src.view(-1, self.groups, self.in_channels // self.groups) src = src.transpose(0, 1).contiguous() out = torch.matmul(src, self.weight) out = out.transpose(1, 0).contiguous() out = out.view(*(size + [self.out_channels])) else: out = torch.matmul(src, self.weight.squeeze(0)) if self.bias is not None: out += self.bias return out def __repr__(self): return '{}({}, {}, groups={}, bias={})'.format(self.__class__. __name__, self.in_channels, self.out_channels, self.groups, self.bias is not None) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import math from torch import Tensor from torch.nn import Linear from torch.nn import Parameter import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 64 x2 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x2, tmp0, xmask) @triton.jit def triton_poi_fused_clamp_div_sum_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 16 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = 1.0 tmp9 = triton_helpers.maximum(tmp7, tmp8) tmp10 = tmp0 / tmp9 tl.store(in_out_ptr0 + x3, tmp10, xmask) @triton.jit def triton_poi_fused_add_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x3, xmask) tmp2 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tl.store(in_out_ptr0 + x3, tmp4, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (1, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (1, 4, 4), (16, 4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(256)](primals_2, buf0, 256, XBLOCK= 256, num_warps=4, num_stages=1) buf1 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf0, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(primals_1, (16, 4, 4), (16, 4, 1), 0), out=buf1) buf2 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf1 triton_poi_fused_clamp_div_sum_1[grid(256)](buf2, primals_2, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf3 = reinterpret_tensor(buf0, (64, 4), (4, 1), 0) del buf0 extern_kernels.mm(reinterpret_tensor(buf2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (4, 1), 0), out=buf3) del primals_3 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (4, 1), 0), out=buf4) del primals_4 buf5 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf3 triton_poi_fused_add_2[grid(256)](buf5, buf4, primals_5, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf4 del primals_5 return buf5, reinterpret_tensor(primals_1, (4, 64), (1, 4), 0 ), reinterpret_tensor(buf2, (4, 64), (1, 4), 0) def uniform(size, tensor): bound = 1.0 / math.sqrt(size) if tensor is not None: tensor.data.uniform_(-bound, bound) def kaiming_uniform(tensor, fan, a): if tensor is not None: bound = math.sqrt(6 / ((1 + a ** 2) * fan)) tensor.data.uniform_(-bound, bound) class DenseSAGEConvNew(torch.nn.Module): """See :class:`torch_geometric.nn.conv.SAGEConv`. """ def __init__(self, in_channels, out_channels, normalize=False, bias=True): super(DenseSAGEConvNew, self).__init__() self.in_channels = in_channels self.out_channels = out_channels self.normalize = normalize self.lin_rel = Linear(in_channels, out_channels, bias=False) self.lin_root = Linear(in_channels, out_channels, bias=bias) self.reset_parameters() def reset_parameters(self): self.lin_rel.reset_parameters() self.lin_root.reset_parameters() def __repr__(self): return '{}({}, {})'.format(self.__class__.__name__, self. in_channels, self.out_channels) def forward(self, input_0, input_1): primals_3 = self.lin_rel.weight primals_4 = self.lin_root.weight primals_5 = self.lin_root.bias primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0] class Linear(torch.nn.Module): def __init__(self, in_channels, out_channels, groups=1, bias=True): super(Linear, self).__init__() assert in_channels % groups == 0 and out_channels % groups == 0 self.in_channels = in_channels self.out_channels = out_channels self.groups = groups self.weight = Parameter(Tensor(groups, in_channels // groups, out_channels // groups)) if bias: self.bias = Parameter(torch.Tensor(out_channels)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): kaiming_uniform(self.weight, fan=self.weight.size(1), a=math.sqrt(5)) uniform(self.weight.size(1), self.bias) def forward(self, src): if self.groups > 1: size = list(src.size())[:-1] src = src.view(-1, self.groups, self.in_channels // self.groups) src = src.transpose(0, 1).contiguous() out = torch.matmul(src, self.weight) out = out.transpose(1, 0).contiguous() out = out.view(*(size + [self.out_channels])) else: out = torch.matmul(src, self.weight.squeeze(0)) if self.bias is not None: out += self.bias return out def __repr__(self): return '{}({}, {}, groups={}, bias={})'.format(self.__class__. __name__, self.in_channels, self.out_channels, self.groups, self.bias is not None)
CFF-Dream/pytorch_geometric
DenseSAGEConv
false
2,033
[ "MIT" ]
0
7c19ad74957409ee9e07314ce81524b3113b9c84
https://github.com/CFF-Dream/pytorch_geometric/tree/7c19ad74957409ee9e07314ce81524b3113b9c84
import math import torch from torch import Tensor from torch.nn import Linear import torch.nn.functional as F from torch.nn import Parameter import torch.utils.data def uniform(size, tensor): bound = 1.0 / math.sqrt(size) if tensor is not None: tensor.data.uniform_(-bound, bound) def kaiming_uniform(tensor, fan, a): if tensor is not None: bound = math.sqrt(6 / ((1 + a ** 2) * fan)) tensor.data.uniform_(-bound, bound) class Model(torch.nn.Module): """See :class:`torch_geometric.nn.conv.SAGEConv`. """ def __init__(self, in_channels, out_channels, normalize=False, bias=True): super().__init__() self.in_channels = in_channels self.out_channels = out_channels self.normalize = normalize self.lin_rel = Linear(in_channels, out_channels, bias=False) self.lin_root = Linear(in_channels, out_channels, bias=bias) self.reset_parameters() def reset_parameters(self): self.lin_rel.reset_parameters() self.lin_root.reset_parameters() def forward(self, x, adj, mask=None): """ Args: x (Tensor): Node feature tensor :math:`\\mathbf{X} \\in \\mathbb{R}^{B \\times N \\times F}`, with batch-size :math:`B`, (maximum) number of nodes :math:`N` for each graph, and feature dimension :math:`F`. adj (Tensor): Adjacency tensor :math:`\\mathbf{A} \\in \\mathbb{R}^{B \\times N \\times N}`. The adjacency tensor is broadcastable in the batch dimension, resulting in a shared adjacency matrix for the complete batch. mask (BoolTensor, optional): Mask matrix :math:`\\mathbf{M} \\in {\\{ 0, 1 \\}}^{B \\times N}` indicating the valid nodes for each graph. (default: :obj:`None`) add_loop (bool, optional): If set to :obj:`False`, the layer will not automatically add self-loops to the adjacency matrices. (default: :obj:`True`) """ x = x.unsqueeze(0) if x.dim() == 2 else x adj = adj.unsqueeze(0) if adj.dim() == 2 else adj B, N, _ = adj.size() out = torch.matmul(adj, x) out = out / adj.sum(dim=-1, keepdim=True).clamp(min=1) out = self.lin_rel(out) + self.lin_root(x) if self.normalize: out = F.normalize(out, p=2, dim=-1) if mask is not None: out = out * mask.view(B, N, 1) return out def __repr__(self): return '{}({}, {})'.format(self.__class__.__name__, self. in_channels, self.out_channels) class Linear(torch.nn.Module): def __init__(self, in_channels, out_channels, groups=1, bias=True): super().__init__() assert in_channels % groups == 0 and out_channels % groups == 0 self.in_channels = in_channels self.out_channels = out_channels self.groups = groups self.weight = Parameter(Tensor(groups, in_channels // groups, out_channels // groups)) if bias: self.bias = Parameter(torch.Tensor(out_channels)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): kaiming_uniform(self.weight, fan=self.weight.size(1), a=math.sqrt(5)) uniform(self.weight.size(1), self.bias) def forward(self, src): if self.groups > 1: size = list(src.size())[:-1] src = src.view(-1, self.groups, self.in_channels // self.groups) src = src.transpose(0, 1).contiguous() out = torch.matmul(src, self.weight) out = out.transpose(1, 0).contiguous() out = out.view(*(size + [self.out_channels])) else: out = torch.matmul(src, self.weight.squeeze(0)) if self.bias is not None: out += self.bias return out def __repr__(self): # ... truncated (>4000 chars) for memory efficiency
ShiftedSoftplus
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/ff/cfflcrfbkqug6wu6spd6cdudrcnssg32k3762qutew77yjljfd4b.py # Topologically Sorted Source Nodes: [softplus, sub], Original ATen: [aten.softplus, aten.sub] # Source node to ATen node mapping: # softplus => exp, gt, log1p, where # sub => sub # Graph fragment: # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%arg0_1, 20), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%arg0_1,), kwargs = {}) # %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp,), kwargs = {}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %arg0_1, %log1p), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where, 0.6931471824645996), kwargs = {}) triton_poi_fused_softplus_sub_0 = async_compile.triton('triton_poi_fused_softplus_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_softplus_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_softplus_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 20.0 tmp2 = tmp0 > tmp1 tmp3 = tl_math.exp(tmp0) tmp4 = libdevice.log1p(tmp3) tmp5 = tl.where(tmp2, tmp0, tmp4) tmp6 = 0.6931471824645996 tmp7 = tmp5 - tmp6 tl.store(out_ptr0 + (x0), tmp7, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [softplus, sub], Original ATen: [aten.softplus, aten.sub] stream0 = get_raw_stream(0) triton_poi_fused_softplus_sub_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F import torch.utils.data class ShiftedSoftplus(torch.nn.Module): def __init__(self): super(ShiftedSoftplus, self).__init__() self.shift = torch.log(torch.tensor(2.0)).item() def forward(self, x): return F.softplus(x) - self.shift def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_softplus_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 20.0 tmp2 = tmp0 > tmp1 tmp3 = tl_math.exp(tmp0) tmp4 = libdevice.log1p(tmp3) tmp5 = tl.where(tmp2, tmp0, tmp4) tmp6 = 0.6931471824645996 tmp7 = tmp5 - tmp6 tl.store(out_ptr0 + x0, tmp7, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_softplus_sub_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class ShiftedSoftplusNew(torch.nn.Module): def __init__(self): super(ShiftedSoftplusNew, self).__init__() self.shift = torch.log(torch.tensor(2.0)).item() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
CFF-Dream/pytorch_geometric
ShiftedSoftplus
false
2,034
[ "MIT" ]
0
7c19ad74957409ee9e07314ce81524b3113b9c84
https://github.com/CFF-Dream/pytorch_geometric/tree/7c19ad74957409ee9e07314ce81524b3113b9c84
import torch import torch.nn.functional as F import torch.utils.data class Model(torch.nn.Module): def __init__(self): super().__init__() self.shift = torch.log(torch.tensor(2.0)).item() def forward(self, x): return F.softplus(x) - self.shift def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
ResidualLayer
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/e2/ce2g7tigorqocikoyop3snym2gsvx24wihndsskmg2opuqcxo23u.py # Topologically Sorted Source Nodes: [sigmoid, mul], Original ATen: [aten.sigmoid, aten.mul] # Source node to ATen node mapping: # mul => mul # sigmoid => sigmoid # Graph fragment: # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%view_3,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_3, %sigmoid), kwargs = {}) triton_poi_fused_mul_sigmoid_0 = async_compile.triton('triton_poi_fused_mul_sigmoid_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sigmoid_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_sigmoid_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + (x3), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/7n/c7nhgutyhtoc6mh6c4kax3vehfp6poo6edleostzr3iw7c7dh4en.py # Topologically Sorted Source Nodes: [sigmoid_1, mul_1, add], Original ATen: [aten.sigmoid, aten.mul, aten.add] # Source node to ATen node mapping: # add => add_2 # mul_1 => mul_1 # sigmoid_1 => sigmoid_1 # Graph fragment: # %sigmoid_1 : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%view_8,), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_8, %sigmoid_1), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_2, %mul_1), kwargs = {}) triton_poi_fused_add_mul_sigmoid_1 = async_compile.triton('triton_poi_fused_add_mul_sigmoid_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_sigmoid_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_sigmoid_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr1 + (x3), xmask) tmp2 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tl.sigmoid(tmp3) tmp5 = tmp3 * tmp4 tmp6 = tmp0 + tmp5 tl.store(out_ptr0 + (x3), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (1, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (1, 4, 4), (16, 4, 1)) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [out], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (4, 1), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [sigmoid, mul], Original ATen: [aten.sigmoid, aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_mul_sigmoid_0.run(buf0, primals_3, buf1, 256, grid=grid(256), stream=stream0) buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (4, 1), 0), out=buf2) buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [sigmoid_1, mul_1, add], Original ATen: [aten.sigmoid, aten.mul, aten.add] triton_poi_fused_add_mul_sigmoid_1.run(primals_2, buf2, primals_5, buf3, 256, grid=grid(256), stream=stream0) return (buf3, primals_3, primals_5, buf0, buf2, reinterpret_tensor(buf1, (4, 64), (1, 4), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), reinterpret_tensor(primals_2, (4, 64), (1, 4), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((1, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((1, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch from torch import Tensor from torch.nn import Linear from torch.nn import Parameter import torch.utils.data def uniform(size, tensor): bound = 1.0 / math.sqrt(size) if tensor is not None: tensor.data.uniform_(-bound, bound) def kaiming_uniform(tensor, fan, a): if tensor is not None: bound = math.sqrt(6 / ((1 + a ** 2) * fan)) tensor.data.uniform_(-bound, bound) def swish(x): return x * x.sigmoid() def glorot_orthogonal(tensor, scale): if tensor is not None: torch.nn.init.orthogonal_(tensor.data) scale /= (tensor.size(-2) + tensor.size(-1)) * tensor.var() tensor.data *= scale.sqrt() class Linear(torch.nn.Module): def __init__(self, in_channels, out_channels, groups=1, bias=True): super(Linear, self).__init__() assert in_channels % groups == 0 and out_channels % groups == 0 self.in_channels = in_channels self.out_channels = out_channels self.groups = groups self.weight = Parameter(Tensor(groups, in_channels // groups, out_channels // groups)) if bias: self.bias = Parameter(torch.Tensor(out_channels)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): kaiming_uniform(self.weight, fan=self.weight.size(1), a=math.sqrt(5)) uniform(self.weight.size(1), self.bias) def forward(self, src): if self.groups > 1: size = list(src.size())[:-1] src = src.view(-1, self.groups, self.in_channels // self.groups) src = src.transpose(0, 1).contiguous() out = torch.matmul(src, self.weight) out = out.transpose(1, 0).contiguous() out = out.view(*(size + [self.out_channels])) else: out = torch.matmul(src, self.weight.squeeze(0)) if self.bias is not None: out += self.bias return out def __repr__(self): return '{}({}, {}, groups={}, bias={})'.format(self.__class__. __name__, self.in_channels, self.out_channels, self.groups, self.bias is not None) class ResidualLayer(torch.nn.Module): def __init__(self, hidden_channels, act=swish): super(ResidualLayer, self).__init__() self.act = act self.lin1 = Linear(hidden_channels, hidden_channels) self.lin2 = Linear(hidden_channels, hidden_channels) self.reset_parameters() def reset_parameters(self): glorot_orthogonal(self.lin1.weight, scale=2.0) self.lin1.bias.data.fill_(0) glorot_orthogonal(self.lin2.weight, scale=2.0) self.lin2.bias.data.fill_(0) def forward(self, x): return x + self.act(self.lin2(self.act(self.lin1(x)))) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'hidden_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import math from torch import Tensor from torch.nn import Linear from torch.nn import Parameter import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_sigmoid_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_add_mul_sigmoid_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x3, xmask) tmp2 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tl.sigmoid(tmp3) tmp5 = tmp3 * tmp4 tmp6 = tmp0 + tmp5 tl.store(out_ptr0 + x3, tmp6, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (1, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (1, 4, 4), (16, 4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (4, 1), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_sigmoid_0[grid(256)](buf0, primals_3, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1) buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (4, 1), 0), out=buf2) buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_mul_sigmoid_1[grid(256)](primals_2, buf2, primals_5, buf3, 256, XBLOCK=256, num_warps=4, num_stages=1) return buf3, primals_3, primals_5, buf0, buf2, reinterpret_tensor(buf1, (4, 64), (1, 4), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0 ), reinterpret_tensor(primals_2, (4, 64), (1, 4), 0) def uniform(size, tensor): bound = 1.0 / math.sqrt(size) if tensor is not None: tensor.data.uniform_(-bound, bound) def kaiming_uniform(tensor, fan, a): if tensor is not None: bound = math.sqrt(6 / ((1 + a ** 2) * fan)) tensor.data.uniform_(-bound, bound) def swish(x): return x * x.sigmoid() def glorot_orthogonal(tensor, scale): if tensor is not None: torch.nn.init.orthogonal_(tensor.data) scale /= (tensor.size(-2) + tensor.size(-1)) * tensor.var() tensor.data *= scale.sqrt() class Linear(torch.nn.Module): def __init__(self, in_channels, out_channels, groups=1, bias=True): super(Linear, self).__init__() assert in_channels % groups == 0 and out_channels % groups == 0 self.in_channels = in_channels self.out_channels = out_channels self.groups = groups self.weight = Parameter(Tensor(groups, in_channels // groups, out_channels // groups)) if bias: self.bias = Parameter(torch.Tensor(out_channels)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): kaiming_uniform(self.weight, fan=self.weight.size(1), a=math.sqrt(5)) uniform(self.weight.size(1), self.bias) def forward(self, src): if self.groups > 1: size = list(src.size())[:-1] src = src.view(-1, self.groups, self.in_channels // self.groups) src = src.transpose(0, 1).contiguous() out = torch.matmul(src, self.weight) out = out.transpose(1, 0).contiguous() out = out.view(*(size + [self.out_channels])) else: out = torch.matmul(src, self.weight.squeeze(0)) if self.bias is not None: out += self.bias return out def __repr__(self): return '{}({}, {}, groups={}, bias={})'.format(self.__class__. __name__, self.in_channels, self.out_channels, self.groups, self.bias is not None) class ResidualLayerNew(torch.nn.Module): def __init__(self, hidden_channels, act=swish): super(ResidualLayerNew, self).__init__() self.act = act self.lin1 = Linear(hidden_channels, hidden_channels) self.lin2 = Linear(hidden_channels, hidden_channels) self.reset_parameters() def reset_parameters(self): glorot_orthogonal(self.lin1.weight, scale=2.0) self.lin1.bias.data.fill_(0) glorot_orthogonal(self.lin2.weight, scale=2.0) self.lin2.bias.data.fill_(0) def forward(self, input_0): primals_1 = self.lin1.weight primals_3 = self.lin1.bias primals_4 = self.lin2.weight primals_5 = self.lin2.bias primals_2 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
CFF-Dream/pytorch_geometric
ResidualLayer
false
2,035
[ "MIT" ]
0
7c19ad74957409ee9e07314ce81524b3113b9c84
https://github.com/CFF-Dream/pytorch_geometric/tree/7c19ad74957409ee9e07314ce81524b3113b9c84
import math import torch from torch import Tensor from torch.nn import Linear from torch.nn import Parameter import torch.utils.data def uniform(size, tensor): bound = 1.0 / math.sqrt(size) if tensor is not None: tensor.data.uniform_(-bound, bound) def kaiming_uniform(tensor, fan, a): if tensor is not None: bound = math.sqrt(6 / ((1 + a ** 2) * fan)) tensor.data.uniform_(-bound, bound) def swish(x): return x * x.sigmoid() def glorot_orthogonal(tensor, scale): if tensor is not None: torch.nn.init.orthogonal_(tensor.data) scale /= (tensor.size(-2) + tensor.size(-1)) * tensor.var() tensor.data *= scale.sqrt() class Linear(torch.nn.Module): def __init__(self, in_channels, out_channels, groups=1, bias=True): super().__init__() assert in_channels % groups == 0 and out_channels % groups == 0 self.in_channels = in_channels self.out_channels = out_channels self.groups = groups self.weight = Parameter(Tensor(groups, in_channels // groups, out_channels // groups)) if bias: self.bias = Parameter(torch.Tensor(out_channels)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): kaiming_uniform(self.weight, fan=self.weight.size(1), a=math.sqrt(5)) uniform(self.weight.size(1), self.bias) def forward(self, src): if self.groups > 1: size = list(src.size())[:-1] src = src.view(-1, self.groups, self.in_channels // self.groups) src = src.transpose(0, 1).contiguous() out = torch.matmul(src, self.weight) out = out.transpose(1, 0).contiguous() out = out.view(*(size + [self.out_channels])) else: out = torch.matmul(src, self.weight.squeeze(0)) if self.bias is not None: out += self.bias return out def __repr__(self): return '{}({}, {}, groups={}, bias={})'.format(self.__class__. __name__, self.in_channels, self.out_channels, self.groups, self.bias is not None) class Model(torch.nn.Module): def __init__(self, hidden_channels, act=swish): super().__init__() self.act = act self.lin1 = Linear(hidden_channels, hidden_channels) self.lin2 = Linear(hidden_channels, hidden_channels) self.reset_parameters() def reset_parameters(self): glorot_orthogonal(self.lin1.weight, scale=2.0) self.lin1.bias.data.fill_(0) glorot_orthogonal(self.lin2.weight, scale=2.0) self.lin2.bias.data.fill_(0) def forward(self, x): return x + self.act(self.lin2(self.act(self.lin1(x)))) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4]
Envelope
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/og/coge23wv22hz2aekdz4xkqtdej7ozgwlhpudo45mlboyea3tskh4.py # Topologically Sorted Source Nodes: [truediv, x_pow_p0, mul_1, add, x_pow_p1, mul_2, add_1, mul_3, mul_4, add_2], Original ATen: [aten.reciprocal, aten.mul, aten.pow, aten.add] # Source node to ATen node mapping: # add => add # add_1 => add_1 # add_2 => add_2 # mul_1 => mul_2 # mul_2 => mul_3 # mul_3 => mul_4 # mul_4 => mul_5 # truediv => mul_1, reciprocal # x_pow_p0 => pow_1 # x_pow_p1 => mul # Graph fragment: # %reciprocal : [num_users=1] = call_function[target=torch.ops.aten.reciprocal.default](args = (%arg0_1,), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%reciprocal, 1.0), kwargs = {}) # %pow_1 : [num_users=2] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg0_1, 4), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_1, -15.0), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %mul_2), kwargs = {}) # %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_1, %arg0_1), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, 24), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %mul_3), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, -10.0), kwargs = {}) # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_4, %arg0_1), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, %mul_5), kwargs = {}) triton_poi_fused_add_mul_pow_reciprocal_0 = async_compile.triton('triton_poi_fused_add_mul_pow_reciprocal_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_pow_reciprocal_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_pow_reciprocal_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl.full([1], 1, tl.int32) tmp2 = tmp1 / tmp0 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tmp5 = tmp0 * tmp0 tmp6 = tmp5 * tmp5 tmp7 = -15.0 tmp8 = tmp6 * tmp7 tmp9 = tmp4 + tmp8 tmp10 = tmp6 * tmp0 tmp11 = 24.0 tmp12 = tmp10 * tmp11 tmp13 = tmp9 + tmp12 tmp14 = -10.0 tmp15 = tmp10 * tmp14 tmp16 = tmp15 * tmp0 tmp17 = tmp13 + tmp16 tl.store(out_ptr0 + (x0), tmp17, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [truediv, x_pow_p0, mul_1, add, x_pow_p1, mul_2, add_1, mul_3, mul_4, add_2], Original ATen: [aten.reciprocal, aten.mul, aten.pow, aten.add] stream0 = get_raw_stream(0) triton_poi_fused_add_mul_pow_reciprocal_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.utils.data class Envelope(torch.nn.Module): def __init__(self, exponent): super(Envelope, self).__init__() self.p = exponent self.a = -(self.p + 1) * (self.p + 2) / 2 self.b = self.p * (self.p + 2) self.c = -self.p * (self.p + 1) / 2 def forward(self, x): p, a, b, c = self.p, self.a, self.b, self.c x_pow_p0 = x.pow(p) x_pow_p1 = x_pow_p0 * x return 1.0 / x + a * x_pow_p0 + b * x_pow_p1 + c * x_pow_p1 * x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'exponent': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_mul_pow_reciprocal_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.full([1], 1, tl.int32) tmp2 = tmp1 / tmp0 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tmp5 = tmp0 * tmp0 tmp6 = tmp5 * tmp5 tmp7 = -15.0 tmp8 = tmp6 * tmp7 tmp9 = tmp4 + tmp8 tmp10 = tmp6 * tmp0 tmp11 = 24.0 tmp12 = tmp10 * tmp11 tmp13 = tmp9 + tmp12 tmp14 = -10.0 tmp15 = tmp10 * tmp14 tmp16 = tmp15 * tmp0 tmp17 = tmp13 + tmp16 tl.store(out_ptr0 + x0, tmp17, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mul_pow_reciprocal_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class EnvelopeNew(torch.nn.Module): def __init__(self, exponent): super(EnvelopeNew, self).__init__() self.p = exponent self.a = -(self.p + 1) * (self.p + 2) / 2 self.b = self.p * (self.p + 2) self.c = -self.p * (self.p + 1) / 2 def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
CFF-Dream/pytorch_geometric
Envelope
false
2,036
[ "MIT" ]
0
7c19ad74957409ee9e07314ce81524b3113b9c84
https://github.com/CFF-Dream/pytorch_geometric/tree/7c19ad74957409ee9e07314ce81524b3113b9c84
import torch import torch.utils.data class Model(torch.nn.Module): def __init__(self, exponent): super().__init__() self.p = exponent self.a = -(self.p + 1) * (self.p + 2) / 2 self.b = self.p * (self.p + 2) self.c = -self.p * (self.p + 1) / 2 def forward(self, x): p, a, b, c = self.p, self.a, self.b, self.c x_pow_p0 = x.pow(p) x_pow_p1 = x_pow_p0 * x return 1.0 / x + a * x_pow_p0 + b * x_pow_p1 + c * x_pow_p1 * x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4]
CuboidPoseHead
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/wy/cwye4gfk4ug6fholrtujqkcyckps5nmpvc74fhtecqjpl343zhyh.py # Topologically Sorted Source Nodes: [x_1, x_2, human_poses], Original ATen: [aten._softmax, aten.mul, aten.sum] # Source node to ATen node mapping: # human_poses => sum_2 # x_1 => div, exp, sum_1 # x_2 => mul_1 # Graph fragment: # %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view, 1), kwargs = {}) # %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [2], True), kwargs = {}) # %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {}) # %mul_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_tensor, 4), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%mul_tensor_1,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [2], True), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, %unsqueeze), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_1, [2]), kwargs = {}) triton_poi_fused__softmax_mul_sum_0 = async_compile.triton('triton_poi_fused__softmax_mul_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_mul_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = (xindex // 4) x4 = xindex % 16 x5 = xindex tmp0 = tl.load(in_ptr0 + (x3), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (x4), xmask, eviction_policy='evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp3 = tmp2 - tmp2 tmp4 = 4.0 tmp5 = tmp3 * tmp4 tmp6 = tl_math.exp(tmp5) tmp7 = tmp6 / tmp6 tmp9 = tmp7 * tmp8 tl.store(out_ptr0 + (x5), tmp9, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_1, x_2, human_poses], Original ATen: [aten._softmax, aten.mul, aten.sum] stream0 = get_raw_stream(0) triton_poi_fused__softmax_mul_sum_0.run(arg0_1, arg1_1, buf0, 64, grid=grid(64), stream=stream0) del arg0_1 del arg1_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class CuboidPoseHead(nn.Module): def __init__(self, beta): """Get results from the 3D human pose heatmap. Instead of obtaining maximums on the heatmap, this module regresses the coordinates of keypoints via integral pose regression. Refer to `paper. <https://arxiv.org/abs/2004.06239>` for more details. Args: beta: Constant to adjust the magnification of soft-maxed heatmap. """ super(CuboidPoseHead, self).__init__() self.beta = beta self.loss = nn.L1Loss() def forward(self, heatmap_volumes, grid_coordinates): """ Args: heatmap_volumes (torch.Tensor(NxKxLxWxH)): 3D human pose heatmaps predicted by the network. grid_coordinates (torch.Tensor(Nx(LxWxH)x3)): Coordinates of the grids in the heatmap volumes. Returns: human_poses (torch.Tensor(NxKx3)): Coordinates of human poses. """ batch_size = heatmap_volumes.size(0) channel = heatmap_volumes.size(1) x = heatmap_volumes.reshape(batch_size, channel, -1, 1) x = F.softmax(self.beta * x, dim=2) grid_coordinates = grid_coordinates.unsqueeze(1) x = torch.mul(x, grid_coordinates) human_poses = torch.sum(x, dim=2) return human_poses def get_loss(self, preds, targets, weights): return dict(loss_pose=self.loss(preds * weights, targets * weights)) def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'beta': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__softmax_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex // 4 x4 = xindex % 16 x5 = xindex tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + x4, xmask, eviction_policy='evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp3 = tmp2 - tmp2 tmp4 = 4.0 tmp5 = tmp3 * tmp4 tmp6 = tl_math.exp(tmp5) tmp7 = tmp6 / tmp6 tmp9 = tmp7 * tmp8 tl.store(out_ptr0 + x5, tmp9, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_mul_sum_0[grid(64)](arg0_1, arg1_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 del arg1_1 return buf0, class CuboidPoseHeadNew(nn.Module): def __init__(self, beta): """Get results from the 3D human pose heatmap. Instead of obtaining maximums on the heatmap, this module regresses the coordinates of keypoints via integral pose regression. Refer to `paper. <https://arxiv.org/abs/2004.06239>` for more details. Args: beta: Constant to adjust the magnification of soft-maxed heatmap. """ super(CuboidPoseHeadNew, self).__init__() self.beta = beta self.loss = nn.L1Loss() def get_loss(self, preds, targets, weights): return dict(loss_pose=self.loss(preds * weights, targets * weights)) def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
ALISCIFP/mmpose
CuboidPoseHead
false
2,037
[ "Apache-2.0" ]
0
2433e3dbcc44baa2253e2a7c748ba0216937933e
https://github.com/ALISCIFP/mmpose/tree/2433e3dbcc44baa2253e2a7c748ba0216937933e
import torch import torch.nn as nn import torch.nn.functional as F class Model(nn.Module): def __init__(self, beta): """Get results from the 3D human pose heatmap. Instead of obtaining maximums on the heatmap, this module regresses the coordinates of keypoints via integral pose regression. Refer to `paper. <https://arxiv.org/abs/2004.06239>` for more details. Args: beta: Constant to adjust the magnification of soft-maxed heatmap. """ super().__init__() self.beta = beta self.loss = nn.L1Loss() def forward(self, heatmap_volumes, grid_coordinates): """ Args: heatmap_volumes (torch.Tensor(NxKxLxWxH)): 3D human pose heatmaps predicted by the network. grid_coordinates (torch.Tensor(Nx(LxWxH)x3)): Coordinates of the grids in the heatmap volumes. Returns: human_poses (torch.Tensor(NxKx3)): Coordinates of human poses. """ batch_size = heatmap_volumes.size(0) channel = heatmap_volumes.size(1) x = heatmap_volumes.reshape(batch_size, channel, -1, 1) x = F.softmax(self.beta * x, dim=2) grid_coordinates = grid_coordinates.unsqueeze(1) x = torch.mul(x, grid_coordinates) human_poses = torch.sum(x, dim=2) return human_poses def get_loss(self, preds, targets, weights): return dict(loss_pose=self.loss(preds * weights, targets * weights)) def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [4]
Attention
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/xl/cxlpplg3hmt6k4x6alhg4yn6eq5jppxhzrzdcvvcpbupy7pjgudn.py # Topologically Sorted Source Nodes: [score_1, max_1, src_max, sub, out], Original ATen: [aten.div, aten.max, aten.clamp, aten.sub, aten.exp] # Source node to ATen node mapping: # max_1 => max_1 # out => exp # score_1 => div # src_max => clamp_min # sub => sub # Graph fragment: # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_2, 2.0), kwargs = {}) # %max_1 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%div, -1, True), kwargs = {}) # %clamp_min : [num_users=2] = call_function[target=torch.ops.aten.clamp_min.default](args = (%getitem, 0), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div, %clamp_min), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused_clamp_div_exp_max_sub_0 = async_compile.triton('triton_poi_fused_clamp_div_exp_max_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clamp_div_exp_max_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clamp_div_exp_max_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp3 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = 0.0 tmp15 = triton_helpers.maximum(tmp13, tmp14) tmp16 = tmp2 - tmp15 tmp17 = tl_math.exp(tmp16) tl.store(out_ptr0 + (x2), tmp17, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/fi/cfijnjpiz4ruggqhl6zhj4ujuexfsuzxvpo26muzj4bggik4i5hl.py # Topologically Sorted Source Nodes: [score_1, max_1, src_max, sum_1, sub_1, exp_1, add], Original ATen: [aten.div, aten.max, aten.clamp, aten.sum, aten.rsub, aten.exp, aten.add] # Source node to ATen node mapping: # add => add # exp_1 => exp_1 # max_1 => max_1 # score_1 => div # src_max => clamp_min # sub_1 => sub_1 # sum_1 => sum_1 # Graph fragment: # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_2, 2.0), kwargs = {}) # %max_1 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%div, -1, True), kwargs = {}) # %clamp_min : [num_users=2] = call_function[target=torch.ops.aten.clamp_min.default](args = (%getitem, 0), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (0, %clamp_min), kwargs = {}) # %exp_1 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub_1,), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_1, %exp_1), kwargs = {}) triton_poi_fused_add_clamp_div_exp_max_rsub_sum_1 = async_compile.triton('triton_poi_fused_add_clamp_div_exp_max_rsub_sum_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_div_exp_max_rsub_sum_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_clamp_div_exp_max_rsub_sum_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp8 = 0.5 tmp9 = tmp7 * tmp8 tmp11 = tmp10 * tmp8 tmp12 = triton_helpers.maximum(tmp9, tmp11) tmp14 = tmp13 * tmp8 tmp15 = triton_helpers.maximum(tmp12, tmp14) tmp17 = tmp16 * tmp8 tmp18 = triton_helpers.maximum(tmp15, tmp17) tmp19 = 0.0 tmp20 = triton_helpers.maximum(tmp18, tmp19) tmp21 = tmp19 - tmp20 tmp22 = tl_math.exp(tmp21) tmp23 = tmp6 + tmp22 tl.store(out_ptr0 + (x0), tmp23, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/h7/ch7xj7aj6agx6frik7qd7tffe2pmrsjjensiwx2hy2md4kde7aj5.py # Topologically Sorted Source Nodes: [score_1, max_1, src_max, sum_1, sub_1, exp_1, add, out_1], Original ATen: [aten.div, aten.max, aten.clamp, aten.sum, aten.rsub, aten.exp, aten.add] # Source node to ATen node mapping: # add => add # exp_1 => exp_1 # max_1 => max_1 # out_1 => div_1 # score_1 => div # src_max => clamp_min # sub_1 => sub_1 # sum_1 => sum_1 # Graph fragment: # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_2, 2.0), kwargs = {}) # %max_1 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%div, -1, True), kwargs = {}) # %clamp_min : [num_users=2] = call_function[target=torch.ops.aten.clamp_min.default](args = (%getitem, 0), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (0, %clamp_min), kwargs = {}) # %exp_1 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub_1,), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_1, %exp_1), kwargs = {}) # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %add), kwargs = {}) triton_poi_fused_add_clamp_div_exp_max_rsub_sum_2 = async_compile.triton('triton_poi_fused_add_clamp_div_exp_max_rsub_sum_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_div_exp_max_rsub_sum_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_clamp_div_exp_max_rsub_sum_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 / tmp1 tl.store(in_out_ptr0 + (x2), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [score], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(arg0_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(arg1_1, (16, 4, 4), (16, 1, 4), 0), out=buf0) del arg0_1 del arg1_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [score_1, max_1, src_max, sub, out], Original ATen: [aten.div, aten.max, aten.clamp, aten.sub, aten.exp] stream0 = get_raw_stream(0) triton_poi_fused_clamp_div_exp_max_sub_0.run(buf0, buf1, 256, grid=grid(256), stream=stream0) buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) # Topologically Sorted Source Nodes: [score_1, max_1, src_max, sum_1, sub_1, exp_1, add], Original ATen: [aten.div, aten.max, aten.clamp, aten.sum, aten.rsub, aten.exp, aten.add] triton_poi_fused_add_clamp_div_exp_max_rsub_sum_1.run(buf1, buf0, buf2, 64, grid=grid(64), stream=stream0) buf3 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [score_1, max_1, src_max, sum_1, sub_1, exp_1, add, out_1], Original ATen: [aten.div, aten.max, aten.clamp, aten.sum, aten.rsub, aten.exp, aten.add] triton_poi_fused_add_clamp_div_exp_max_rsub_sum_2.run(buf3, buf2, 256, grid=grid(256), stream=stream0) del buf2 buf4 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(arg2_1, (16, 4, 4), (16, 4, 1), 0), out=buf4) del arg2_1 del buf3 return (reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1, arg2_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch import torch.nn.functional as F import torch.utils.data def restricted_softmax(src, dim=-1, margin=0): src_max = torch.clamp(src.max(dim=dim, keepdim=True)[0], min=0) out = (src - src_max).exp() out = out / (out.sum(dim=dim, keepdim=True) + (margin - src_max).exp()) return out class Attention(torch.nn.Module): def __init__(self, dropout=0): super(Attention, self).__init__() self.dropout = dropout def forward(self, query, key, value): assert query.dim() == key.dim() == value.dim() >= 2 assert query.size(-1) == key.size(-1) assert key.size(-2) == value.size(-2) score = torch.matmul(query, key.transpose(-2, -1)) score = score / math.sqrt(key.size(-1)) score = restricted_softmax(score, dim=-1) score = F.dropout(score, p=self.dropout, training=self.training) return torch.matmul(score, value) def __repr__(self): return '{}(dropout={})'.format(self.__class__.__name__, self.dropout) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clamp_div_exp_max_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = 0.0 tmp15 = triton_helpers.maximum(tmp13, tmp14) tmp16 = tmp2 - tmp15 tmp17 = tl_math.exp(tmp16) tl.store(out_ptr0 + x2, tmp17, xmask) @triton.jit def triton_poi_fused_add_clamp_div_exp_max_rsub_sum_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp13 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp16 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp8 = 0.5 tmp9 = tmp7 * tmp8 tmp11 = tmp10 * tmp8 tmp12 = triton_helpers.maximum(tmp9, tmp11) tmp14 = tmp13 * tmp8 tmp15 = triton_helpers.maximum(tmp12, tmp14) tmp17 = tmp16 * tmp8 tmp18 = triton_helpers.maximum(tmp15, tmp17) tmp19 = 0.0 tmp20 = triton_helpers.maximum(tmp18, tmp19) tmp21 = tmp19 - tmp20 tmp22 = tl_math.exp(tmp21) tmp23 = tmp6 + tmp22 tl.store(out_ptr0 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_add_clamp_div_exp_max_rsub_sum_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 / tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(arg0_1, (16, 4, 4), (16, 4, 1 ), 0), reinterpret_tensor(arg1_1, (16, 4, 4), (16, 1, 4), 0), out=buf0) del arg0_1 del arg1_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clamp_div_exp_max_sub_0[grid(256)](buf0, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1) buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) triton_poi_fused_add_clamp_div_exp_max_rsub_sum_1[grid(64)](buf1, buf0, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) buf3 = buf1 del buf1 triton_poi_fused_add_clamp_div_exp_max_rsub_sum_2[grid(256)](buf3, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf2 buf4 = buf0 del buf0 extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(arg2_1, (16, 4, 4), (16, 4, 1), 0), out=buf4 ) del arg2_1 del buf3 return reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0), def restricted_softmax(src, dim=-1, margin=0): src_max = torch.clamp(src.max(dim=dim, keepdim=True)[0], min=0) out = (src - src_max).exp() out = out / (out.sum(dim=dim, keepdim=True) + (margin - src_max).exp()) return out class AttentionNew(torch.nn.Module): def __init__(self, dropout=0): super(AttentionNew, self).__init__() self.dropout = dropout def __repr__(self): return '{}(dropout={})'.format(self.__class__.__name__, self.dropout) def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
CFF-Dream/pytorch_geometric
Attention
false
2,038
[ "MIT" ]
0
7c19ad74957409ee9e07314ce81524b3113b9c84
https://github.com/CFF-Dream/pytorch_geometric/tree/7c19ad74957409ee9e07314ce81524b3113b9c84
import math import torch import torch.nn.functional as F import torch.utils.data def restricted_softmax(src, dim=-1, margin=0): src_max = torch.clamp(src.max(dim=dim, keepdim=True)[0], min=0) out = (src - src_max).exp() out = out / (out.sum(dim=dim, keepdim=True) + (margin - src_max).exp()) return out class Model(torch.nn.Module): def __init__(self, dropout=0): super().__init__() self.dropout = dropout def forward(self, query, key, value): assert query.dim() == key.dim() == value.dim() >= 2 assert query.size(-1) == key.size(-1) assert key.size(-2) == value.size(-2) score = torch.matmul(query, key.transpose(-2, -1)) score = score / math.sqrt(key.size(-1)) score = restricted_softmax(score, dim=-1) score = F.dropout(score, p=self.dropout, training=self.training) return torch.matmul(score, value) def __repr__(self): return '{}(dropout={})'.format(self.__class__.__name__, self.dropout) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return []
GlobalAttentionGeneral
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/qi/cqinh332474qtv7bgen4bcfz2yfclns66jnudr7z7wmvlrgqoduc.py # Topologically Sorted Source Nodes: [targetT], Original ATen: [aten.clone, aten.transpose] # Source node to ATen node mapping: # targetT => clone # Graph fragment: # %clone : [num_users=2] = call_function[target=torch.ops.aten.clone.default](args = (%permute,), kwargs = {memory_format: torch.contiguous_format}) # %permute_5 : [num_users=1] = call_function[target=torch.ops.aten.permute.default](args = (%clone, [0, 2, 1]), kwargs = {}) triton_poi_fused_clone_transpose_0 = async_compile.triton('triton_poi_fused_clone_transpose_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 16], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_transpose_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_transpose_0(in_ptr0, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex y2 = yindex % 4 y3 = (yindex // 4) tmp0 = tl.load(in_ptr0 + (x1 + (16*y0)), xmask & ymask) tl.store(out_ptr0 + (x1 + (16*y0)), tmp0, xmask & ymask) tl.store(out_ptr1 + (y2 + (4*x1) + (64*y3)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/hz/chz2sqsqk26mwhf2dxhgh44jfpu2er5yqjftwkzfav5ctqtx5e7f.py # Topologically Sorted Source Nodes: [attn_2], Original ATen: [aten._softmax] # Source node to ATen node mapping: # attn_2 => amax, exp, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_1, [1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_1, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x2), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/pm/cpmy57yidxxfl6wmlh5dsizlsat4uz6k43rz6t4r6h2u4z625i5l.py # Topologically Sorted Source Nodes: [attn_4], Original ATen: [aten.clone] # Source node to ATen node mapping: # attn_4 => clone_1 # Graph fragment: # %clone_1 : [num_users=3] = call_function[target=torch.ops.aten.clone.default](args = (%permute_1,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_2 = async_compile.triton('triton_poi_fused_clone_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (64*y1)), xmask & ymask) tmp1 = tl.load(in_ptr0 + ((4*x2) + (64*y1)), xmask & ymask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x2) + (64*y1)), xmask & ymask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x2) + (64*y1)), xmask & ymask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x2) + (64*y1)), xmask & ymask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2 + (16*y3)), tmp8, xmask & ymask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 4, 1, 1), (4, 1, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(reinterpret_tensor(primals_2, (4, 4, 4, 1), (16, 4, 1, 1), 0), primals_3, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 1), (16, 4, 1, 1)) buf1 = empty_strided_cuda((4, 16, 4), (64, 1, 16), torch.float32) buf6 = empty_strided_cuda((4, 4, 16), (64, 1, 4), torch.float32) # Topologically Sorted Source Nodes: [targetT], Original ATen: [aten.clone, aten.transpose] stream0 = get_raw_stream(0) triton_poi_fused_clone_transpose_0.run(primals_1, buf1, buf6, 16, 16, grid=grid(16, 16), stream=stream0) del primals_1 buf2 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [targetT, attn], Original ATen: [aten.clone, aten.bmm] extern_kernels.bmm(buf1, reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0), out=buf2) buf3 = reinterpret_tensor(buf1, (64, 4), (4, 1), 0); del buf1 # reuse # Topologically Sorted Source Nodes: [attn_2], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf2, buf3, 256, grid=grid(256), stream=stream0) buf4 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32) # Topologically Sorted Source Nodes: [attn_4], Original ATen: [aten.clone] triton_poi_fused_clone_2.run(buf3, buf4, 16, 16, grid=grid(16, 16), stream=stream0) buf5 = reinterpret_tensor(buf3, (4, 4, 16), (64, 16, 1), 0); del buf3 # reuse # Topologically Sorted Source Nodes: [weightedContext], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0), buf4, out=buf5) return (reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0), primals_3, reinterpret_tensor(primals_2, (4, 4, 4, 1), (16, 4, 1, 1), 0), buf2, reinterpret_tensor(buf0, (4, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf4, (4, 16, 4), (64, 1, 16), 0), buf6, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.utils.data import torch.nn.parallel def conv1x1(in_planes, out_planes, bias=False): """1x1 convolution with padding""" return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1, padding=0, bias=bias) class GlobalAttentionGeneral(nn.Module): def __init__(self, idf, cdf): super(GlobalAttentionGeneral, self).__init__() self.conv_context = conv1x1(cdf, idf) self.sm = nn.Softmax(dim=1) self.mask = None def applyMask(self, mask): self.mask = mask def forward(self, input, context): """ input: batch x idf x ih x iw (queryL=ihxiw) context: batch x cdf x sourceL """ ih, iw = input.size(2), input.size(3) queryL = ih * iw batch_size, sourceL = context.size(0), context.size(2) target = input.view(batch_size, -1, queryL) targetT = torch.transpose(target, 1, 2).contiguous() sourceT = context.unsqueeze(3) sourceT = self.conv_context(sourceT).squeeze(3) attn = torch.bmm(targetT, sourceT) attn = attn.view(batch_size * queryL, sourceL) if self.mask is not None: mask = self.mask.repeat(queryL, 1) attn.data.masked_fill_(mask.data, -float('inf')) attn = self.sm(attn) attn = attn.view(batch_size, queryL, sourceL) attn = torch.transpose(attn, 1, 2).contiguous() weightedContext = torch.bmm(sourceT, attn) weightedContext = weightedContext.view(batch_size, -1, ih, iw) attn = attn.view(batch_size, -1, ih, iw) return weightedContext, attn def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'idf': 4, 'cdf': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch.utils.data import torch.nn.parallel assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_transpose_0(in_ptr0, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex y2 = yindex % 4 y3 = yindex // 4 tmp0 = tl.load(in_ptr0 + (x1 + 16 * y0), xmask & ymask) tl.store(out_ptr0 + (x1 + 16 * y0), tmp0, xmask & ymask) tl.store(out_ptr1 + (y2 + 4 * x1 + 64 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 64 * y1), xmask & ymask) tmp1 = tl.load(in_ptr0 + (4 * x2 + 64 * y1), xmask & ymask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x2 + 64 * y1), xmask & ymask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x2 + 64 * y1), xmask & ymask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x2 + 64 * y1), xmask & ymask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2 + 16 * y3), tmp8, xmask & ymask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 4, 1, 1), (4, 1, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(reinterpret_tensor(primals_2, (4, 4, 4, 1), (16, 4, 1, 1), 0), primals_3, stride=(1, 1), padding= (0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0 ), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 1), (16, 4, 1, 1)) buf1 = empty_strided_cuda((4, 16, 4), (64, 1, 16), torch.float32) buf6 = empty_strided_cuda((4, 4, 16), (64, 1, 4), torch.float32) get_raw_stream(0) triton_poi_fused_clone_transpose_0[grid(16, 16)](primals_1, buf1, buf6, 16, 16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1) del primals_1 buf2 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.float32) extern_kernels.bmm(buf1, reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0), out=buf2) buf3 = reinterpret_tensor(buf1, (64, 4), (4, 1), 0) del buf1 triton_poi_fused__softmax_1[grid(256)](buf2, buf3, 256, XBLOCK=128, num_warps=4, num_stages=1) buf4 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32) triton_poi_fused_clone_2[grid(16, 16)](buf3, buf4, 16, 16, XBLOCK= 16, YBLOCK=16, num_warps=4, num_stages=1) buf5 = reinterpret_tensor(buf3, (4, 4, 16), (64, 16, 1), 0) del buf3 extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0), buf4, out=buf5) return reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), primals_3, reinterpret_tensor(primals_2, (4, 4, 4, 1), (16, 4, 1, 1), 0), buf2, reinterpret_tensor(buf0, (4, 4, 4), (16, 1, 4), 0 ), reinterpret_tensor(buf4, (4, 16, 4), (64, 1, 16), 0), buf6 def conv1x1(in_planes, out_planes, bias=False): """1x1 convolution with padding""" return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1, padding=0, bias=bias) class GlobalAttentionGeneralNew(nn.Module): def __init__(self, idf, cdf): super(GlobalAttentionGeneralNew, self).__init__() self.conv_context = conv1x1(cdf, idf) self.sm = nn.Softmax(dim=1) self.mask = None def applyMask(self, mask): self.mask = mask def forward(self, input_0, input_1): primals_3 = self.conv_context.weight primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3]) return output[0], output[1]
BedirYilmaz/cycle-image-gan
GlobalAttentionGeneral
false
2,039
[ "MIT" ]
0
a64da5774ec522c0322e9c21437dc9c066a50a89
https://github.com/BedirYilmaz/cycle-image-gan/tree/a64da5774ec522c0322e9c21437dc9c066a50a89
import torch import torch.nn as nn import torch.utils.data import torch.nn.parallel def conv1x1(in_planes, out_planes, bias=False): """1x1 convolution with padding""" return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1, padding=0, bias=bias) class Model(nn.Module): def __init__(self, idf, cdf): super().__init__() self.conv_context = conv1x1(cdf, idf) self.sm = nn.Softmax(dim=1) self.mask = None def applyMask(self, mask): self.mask = mask def forward(self, input, context): """ input: batch x idf x ih x iw (queryL=ihxiw) context: batch x cdf x sourceL """ ih, iw = input.size(2), input.size(3) queryL = ih * iw batch_size, sourceL = context.size(0), context.size(2) target = input.view(batch_size, -1, queryL) targetT = torch.transpose(target, 1, 2).contiguous() sourceT = context.unsqueeze(3) sourceT = self.conv_context(sourceT).squeeze(3) attn = torch.bmm(targetT, sourceT) attn = attn.view(batch_size * queryL, sourceL) if self.mask is not None: mask = self.mask.repeat(queryL, 1) attn.data.masked_fill_(mask.data, -float('inf')) attn = self.sm(attn) attn = attn.view(batch_size, queryL, sourceL) attn = torch.transpose(attn, 1, 2).contiguous() weightedContext = torch.bmm(sourceT, attn) weightedContext = weightedContext.view(batch_size, -1, ih, iw) attn = attn.view(batch_size, -1, ih, iw) return weightedContext, attn def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [4, 4]
FocalLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/tj/ctjenrmpmnfu2d3pk5t7qu4ih2v4kysuba4mugrps3thgibhpgig.py # Topologically Sorted Source Nodes: [binary_cross_entropy_with_logits, mul_2, sub_2, mul_3, add_1, pred_sigmoid, sub, mul, sub_1, mul_1, pt, pow_1, focal_weight, loss, loss_1, loss_cls], Original ATen: [aten.binary_cross_entropy_with_logits, aten.mul, aten.rsub, aten.add, aten.sigmoid, aten.pow, aten.mean] # Source node to ATen node mapping: # add_1 => add_1 # binary_cross_entropy_with_logits => abs_1, exp, full_default, log1p, minimum, mul_5, neg, sub_3, sub_4, sub_5 # focal_weight => mul_4 # loss => mul_6 # loss_1 => mean # loss_cls => mul_7 # mul => mul # mul_1 => mul_1 # mul_2 => mul_2 # mul_3 => mul_3 # pow_1 => pow_1 # pred_sigmoid => sigmoid # pt => add # sub => sub # sub_1 => sub_1 # sub_2 => sub_2 # Graph fragment: # %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg1_1), kwargs = {}) # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_3, %arg0_1), kwargs = {}) # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %minimum : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%full_default, %arg0_1), kwargs = {}) # %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%arg0_1,), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%abs_1,), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%neg,), kwargs = {}) # %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp,), kwargs = {}) # %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum, %log1p), kwargs = {}) # %sub_5 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_5, %sub_4), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, 0.25), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg1_1), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_2, 0.75), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_2, %mul_3), kwargs = {}) # %sigmoid : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%arg0_1,), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %sigmoid), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %arg1_1), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg1_1), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %sub_1), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%add, 2.0), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_1, %pow_1), kwargs = {}) # %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_5, %mul_4), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%mul_6,), kwargs = {}) # %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean, 1.0), kwargs = {}) triton_per_fused_add_binary_cross_entropy_with_logits_mean_mul_pow_rsub_sigmoid_0 = async_compile.triton('triton_per_fused_add_binary_cross_entropy_with_logits_mean_mul_pow_rsub_sigmoid_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_binary_cross_entropy_with_logits_mean_mul_pow_rsub_sigmoid_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_binary_cross_entropy_with_logits_mean_mul_pow_rsub_sigmoid_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp3 = tl.load(in_ptr1 + (r0), None) tmp1 = 1.0 tmp2 = tmp1 - tmp0 tmp4 = tmp2 * tmp3 tmp5 = 0.0 tmp6 = triton_helpers.minimum(tmp5, tmp3) tmp7 = tl_math.abs(tmp3) tmp8 = -tmp7 tmp9 = tl_math.exp(tmp8) tmp10 = libdevice.log1p(tmp9) tmp11 = tmp6 - tmp10 tmp12 = tmp4 - tmp11 tmp13 = 0.25 tmp14 = tmp0 * tmp13 tmp15 = 0.75 tmp16 = tmp2 * tmp15 tmp17 = tmp14 + tmp16 tmp18 = tl.sigmoid(tmp3) tmp19 = tmp1 - tmp18 tmp20 = tmp19 * tmp0 tmp21 = tmp18 * tmp2 tmp22 = tmp20 + tmp21 tmp23 = tmp22 * tmp22 tmp24 = tmp17 * tmp23 tmp25 = tmp12 * tmp24 tmp26 = tl.broadcast_to(tmp25, [RBLOCK]) tmp28 = triton_helpers.promote_to_tensor(tl.sum(tmp26, 0)) tmp29 = 256.0 tmp30 = tmp28 / tmp29 tmp31 = tmp30 * tmp1 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp31, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [binary_cross_entropy_with_logits, mul_2, sub_2, mul_3, add_1, pred_sigmoid, sub, mul, sub_1, mul_1, pt, pow_1, focal_weight, loss, loss_1, loss_cls], Original ATen: [aten.binary_cross_entropy_with_logits, aten.mul, aten.rsub, aten.add, aten.sigmoid, aten.pow, aten.mean] stream0 = get_raw_stream(0) triton_per_fused_add_binary_cross_entropy_with_logits_mean_mul_pow_rsub_sigmoid_0.run(buf1, arg1_1, arg0_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import Tensor import torch.nn as nn from typing import Optional from typing import Union import torch.nn.functional as F def reduce_loss(loss, reduction): """Reduce loss as specified. Args: loss (Tensor): Elementwise loss tensor. reduction (str): Options are "none", "mean" and "sum". Return: Tensor: Reduced loss tensor. """ reduction_enum = F._Reduction.get_enum(reduction) if reduction_enum == 0: return loss elif reduction_enum == 1: return loss.mean() elif reduction_enum == 2: return loss.sum() def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None): """Apply element-wise weight and reduce loss. Args: loss (Tensor): Element-wise loss. weight (Tensor): Element-wise weights. reduction (str): Same as built-in losses of PyTorch. avg_factor (float): Average factor when computing the mean of losses. Returns: Tensor: Processed loss values. """ if weight is not None: loss = loss * weight if avg_factor is None: loss = reduce_loss(loss, reduction) elif reduction == 'mean': loss = loss.sum() / avg_factor elif reduction != 'none': raise ValueError('avg_factor can not be used with reduction="sum"') return loss def sigmoid_focal_loss(pred: 'Tensor', target: 'Tensor', weight: 'Optional[Tensor]'=None, gamma: 'float'=2.0, alpha: 'Union[float, Tensor]'=0.25, reduction: 'str'='mean', avg_factor: 'Optional[float]'=None) ->Tensor: """Sigmoid focal loss. Args: pred: The prediction with shape (N, \\*). target: The ground truth label of the prediction with shape (N, \\*). weight: Sample-wise loss weight with shape (N, ). Defaults to None. gamma: The gamma for calculating the modulating factor. Defaults to 2.0. alpha: A balanced form for Focal Loss. If it is a float, then a global balanced form is applied. If it is Tensor with shape (N, \\*) or any shape that are broadcast-compatible with `pred`. reduction: The method used to reduce the loss. Options are "none", "mean" and "sum". If reduction is 'none' , loss is same shape as pred and label. Defaults to 'mean'. avg_factor: Average factor that is used to average the loss. Defaults to None. Returns: Loss. """ assert pred.shape == target.shape, 'pred and target should be in the same shape.' pred_sigmoid = pred.sigmoid() target = target.type_as(pred) pt = (1 - pred_sigmoid) * target + pred_sigmoid * (1 - target) focal_weight = (alpha * target + (1 - alpha) * (1 - target)) * pt.pow(gamma ) loss = F.binary_cross_entropy_with_logits(pred, target, reduction='none' ) * focal_weight if weight is not None: assert weight.dim() == 1 weight = weight.float() if pred.dim() > 1: weight = weight.reshape(-1, 1) loss = weight_reduce_loss(loss, weight, reduction, avg_factor) return loss class FocalLoss(nn.Module): """Focal loss. Args: gamma (float): Focusing parameter in focal loss. Defaults to 2.0. alpha (float): The parameter in balanced form of focal loss. Defaults to 0.25. reduction (str): The method used to reduce the loss into a scalar. Options are "none" and "mean". Defaults to 'mean'. loss_weight (float): Weight of loss. Defaults to 1.0. """ def __init__(self, gamma=2.0, alpha=0.25, reduction='mean', loss_weight=1.0 ): super(FocalLoss, self).__init__() self.gamma = gamma self.alpha = alpha self.reduction = reduction self.loss_weight = loss_weight def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None): """Sigmoid focal loss. Args: pred (torch.Tensor): The prediction with shape (N, \\*). target (torch.Tensor): The ground truth label of the prediction with shape (N, \\*), N or (N,1). Note that the target must be one-hot encoded weight (torch.Tensor, optional): Sample-wise loss weight with shape (N, \\*). Defaults to None. avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. reduction_override (str, optional): The method used to reduce the loss into a scalar. Options are "none", "mean" and "sum". Defaults to None. Returns: torch.Tensor: Loss. """ assert reduction_override in (None, 'none', 'mean', 'sum') reduction = (reduction_override if reduction_override else self. reduction) loss_cls = self.loss_weight * sigmoid_focal_loss(pred, target, weight, gamma=self.gamma, alpha=self.alpha, reduction=reduction, avg_factor=avg_factor) return loss_cls def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch import Tensor import torch.nn as nn from typing import Optional from typing import Union import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_binary_cross_entropy_with_logits_mean_mul_pow_rsub_sigmoid_0( in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp3 = tl.load(in_ptr1 + r0, None) tmp1 = 1.0 tmp2 = tmp1 - tmp0 tmp4 = tmp2 * tmp3 tmp5 = 0.0 tmp6 = triton_helpers.minimum(tmp5, tmp3) tmp7 = tl_math.abs(tmp3) tmp8 = -tmp7 tmp9 = tl_math.exp(tmp8) tmp10 = libdevice.log1p(tmp9) tmp11 = tmp6 - tmp10 tmp12 = tmp4 - tmp11 tmp13 = 0.25 tmp14 = tmp0 * tmp13 tmp15 = 0.75 tmp16 = tmp2 * tmp15 tmp17 = tmp14 + tmp16 tmp18 = tl.sigmoid(tmp3) tmp19 = tmp1 - tmp18 tmp20 = tmp19 * tmp0 tmp21 = tmp18 * tmp2 tmp22 = tmp20 + tmp21 tmp23 = tmp22 * tmp22 tmp24 = tmp17 * tmp23 tmp25 = tmp12 * tmp24 tmp26 = tl.broadcast_to(tmp25, [RBLOCK]) tmp28 = triton_helpers.promote_to_tensor(tl.sum(tmp26, 0)) tmp29 = 256.0 tmp30 = tmp28 / tmp29 tmp31 = tmp30 * tmp1 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp31, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_add_binary_cross_entropy_with_logits_mean_mul_pow_rsub_sigmoid_0[ grid(1)](buf1, arg1_1, arg0_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, def reduce_loss(loss, reduction): """Reduce loss as specified. Args: loss (Tensor): Elementwise loss tensor. reduction (str): Options are "none", "mean" and "sum". Return: Tensor: Reduced loss tensor. """ reduction_enum = F._Reduction.get_enum(reduction) if reduction_enum == 0: return loss elif reduction_enum == 1: return loss.mean() elif reduction_enum == 2: return loss.sum() def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None): """Apply element-wise weight and reduce loss. Args: loss (Tensor): Element-wise loss. weight (Tensor): Element-wise weights. reduction (str): Same as built-in losses of PyTorch. avg_factor (float): Average factor when computing the mean of losses. Returns: Tensor: Processed loss values. """ if weight is not None: loss = loss * weight if avg_factor is None: loss = reduce_loss(loss, reduction) elif reduction == 'mean': loss = loss.sum() / avg_factor elif reduction != 'none': raise ValueError('avg_factor can not be used with reduction="sum"') return loss def sigmoid_focal_loss(pred: 'Tensor', target: 'Tensor', weight: 'Optional[Tensor]'=None, gamma: 'float'=2.0, alpha: 'Union[float, Tensor]'=0.25, reduction: 'str'='mean', avg_factor: 'Optional[float]'=None) ->Tensor: """Sigmoid focal loss. Args: pred: The prediction with shape (N, \\*). target: The ground truth label of the prediction with shape (N, \\*). weight: Sample-wise loss weight with shape (N, ). Defaults to None. gamma: The gamma for calculating the modulating factor. Defaults to 2.0. alpha: A balanced form for Focal Loss. If it is a float, then a global balanced form is applied. If it is Tensor with shape (N, \\*) or any shape that are broadcast-compatible with `pred`. reduction: The method used to reduce the loss. Options are "none", "mean" and "sum". If reduction is 'none' , loss is same shape as pred and label. Defaults to 'mean'. avg_factor: Average factor that is used to average the loss. Defaults to None. Returns: Loss. """ assert pred.shape == target.shape, 'pred and target should be in the same shape.' pred_sigmoid = pred.sigmoid() target = target.type_as(pred) pt = (1 - pred_sigmoid) * target + pred_sigmoid * (1 - target) focal_weight = (alpha * target + (1 - alpha) * (1 - target)) * pt.pow(gamma ) loss = F.binary_cross_entropy_with_logits(pred, target, reduction='none' ) * focal_weight if weight is not None: assert weight.dim() == 1 weight = weight.float() if pred.dim() > 1: weight = weight.reshape(-1, 1) loss = weight_reduce_loss(loss, weight, reduction, avg_factor) return loss class FocalLossNew(nn.Module): """Focal loss. Args: gamma (float): Focusing parameter in focal loss. Defaults to 2.0. alpha (float): The parameter in balanced form of focal loss. Defaults to 0.25. reduction (str): The method used to reduce the loss into a scalar. Options are "none" and "mean". Defaults to 'mean'. loss_weight (float): Weight of loss. Defaults to 1.0. """ def __init__(self, gamma=2.0, alpha=0.25, reduction='mean', loss_weight=1.0 ): super(FocalLossNew, self).__init__() self.gamma = gamma self.alpha = alpha self.reduction = reduction self.loss_weight = loss_weight def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
CAMP-eXplain-AI/imba-explain
FocalLoss
false
2,040
[ "MIT" ]
0
e41b4ca5de63955cb0e925aad9599f38c5a3e973
https://github.com/CAMP-eXplain-AI/imba-explain/tree/e41b4ca5de63955cb0e925aad9599f38c5a3e973
import torch from torch import Tensor import torch.nn as nn from typing import Optional from typing import Union import torch.nn.functional as F def reduce_loss(loss, reduction): """Reduce loss as specified. Args: loss (Tensor): Elementwise loss tensor. reduction (str): Options are "none", "mean" and "sum". Return: Tensor: Reduced loss tensor. """ reduction_enum = F._Reduction.get_enum(reduction) if reduction_enum == 0: return loss elif reduction_enum == 1: return loss.mean() elif reduction_enum == 2: return loss.sum() def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None): """Apply element-wise weight and reduce loss. Args: loss (Tensor): Element-wise loss. weight (Tensor): Element-wise weights. reduction (str): Same as built-in losses of PyTorch. avg_factor (float): Average factor when computing the mean of losses. Returns: Tensor: Processed loss values. """ if weight is not None: loss = loss * weight if avg_factor is None: loss = reduce_loss(loss, reduction) elif reduction == 'mean': loss = loss.sum() / avg_factor elif reduction != 'none': raise ValueError('avg_factor can not be used with reduction="sum"') return loss def sigmoid_focal_loss(pred: 'Tensor', target: 'Tensor', weight: 'Optional[Tensor]'=None, gamma: 'float'=2.0, alpha: 'Union[float, Tensor]'=0.25, reduction: 'str'='mean', avg_factor: 'Optional[float]'=None) ->Tensor: """Sigmoid focal loss. Args: pred: The prediction with shape (N, \\*). target: The ground truth label of the prediction with shape (N, \\*). weight: Sample-wise loss weight with shape (N, ). Defaults to None. gamma: The gamma for calculating the modulating factor. Defaults to 2.0. alpha: A balanced form for Focal Loss. If it is a float, then a global balanced form is applied. If it is Tensor with shape (N, \\*) or any shape that are broadcast-compatible with `pred`. reduction: The method used to reduce the loss. Options are "none", "mean" and "sum". If reduction is 'none' , loss is same shape as pred and label. Defaults to 'mean'. avg_factor: Average factor that is used to average the loss. Defaults to None. Returns: Loss. """ assert pred.shape == target.shape, 'pred and target should be in the same shape.' pred_sigmoid = pred.sigmoid() target = target.type_as(pred) pt = (1 - pred_sigmoid) * target + pred_sigmoid * (1 - target) focal_weight = (alpha * target + (1 - alpha) * (1 - target)) * pt.pow(gamma ) loss = F.binary_cross_entropy_with_logits(pred, target, reduction='none' ) * focal_weight if weight is not None: assert weight.dim() == 1 weight = weight.float() if pred.dim() > 1: weight = weight.reshape(-1, 1) loss = weight_reduce_loss(loss, weight, reduction, avg_factor) return loss class Model(nn.Module): """Focal loss. Args: gamma (float): Focusing parameter in focal loss. Defaults to 2.0. alpha (float): The parameter in balanced form of focal loss. Defaults to 0.25. reduction (str): The method used to reduce the loss into a scalar. Options are "none" and "mean". Defaults to 'mean'. loss_weight (float): Weight of loss. Defaults to 1.0. """ def __init__(self, gamma=2.0, alpha=0.25, reduction='mean', loss_weight=1.0 ): super().__init__() self.gamma = gamma self.alpha = alpha self.reduction = reduction self.loss_weight = loss_weight def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None): """Sigmoid focal loss. Args: p # ... truncated (>4000 chars) for memory efficiency
Discriminator
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/r3/cr3febcwm3t44fuoitsx3ou2p6xg4sk4f7unagmmrvffasxf47te.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # x => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) # %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, xmask) tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf6, 256, grid=grid(256), stream=stream0) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf2 # reuse buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_0.run(buf3, primals_5, buf5, 256, grid=grid(256), stream=stream0) del primals_5 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.addmm] extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf4) del primals_7 return (reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(buf3, (64, 4), (4, 1), 0), primals_6, buf5, primals_4, buf6, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F import torch.utils.data class Discriminator(torch.nn.Module): def __init__(self, in_channels, hidden_channels, out_channels): super(Discriminator, self).__init__() self.lin1 = torch.nn.Linear(in_channels, hidden_channels) self.lin2 = torch.nn.Linear(hidden_channels, hidden_channels) self.lin3 = torch.nn.Linear(hidden_channels, out_channels) def forward(self, x): x = F.relu(self.lin1(x)) x = F.relu(self.lin2(x)) x = self.lin3(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'hidden_channels': 4, 'out_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1, primals_2, buf6, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf2 buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_0[grid(256)](buf3, primals_5, buf5, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf4) del primals_7 return reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor( buf3, (64, 4), (4, 1), 0), primals_6, buf5, primals_4, buf6 class DiscriminatorNew(torch.nn.Module): def __init__(self, in_channels, hidden_channels, out_channels): super(DiscriminatorNew, self).__init__() self.lin1 = torch.nn.Linear(in_channels, hidden_channels) self.lin2 = torch.nn.Linear(hidden_channels, hidden_channels) self.lin3 = torch.nn.Linear(hidden_channels, out_channels) def forward(self, input_0): primals_1 = self.lin1.weight primals_2 = self.lin1.bias primals_4 = self.lin2.weight primals_5 = self.lin2.bias primals_6 = self.lin3.weight primals_7 = self.lin3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
CFF-Dream/pytorch_geometric
Discriminator
false
2,041
[ "MIT" ]
0
7c19ad74957409ee9e07314ce81524b3113b9c84
https://github.com/CFF-Dream/pytorch_geometric/tree/7c19ad74957409ee9e07314ce81524b3113b9c84
import torch import torch.nn.functional as F import torch.utils.data class Model(torch.nn.Module): def __init__(self, in_channels, hidden_channels, out_channels): super().__init__() self.lin1 = torch.nn.Linear(in_channels, hidden_channels) self.lin2 = torch.nn.Linear(hidden_channels, hidden_channels) self.lin3 = torch.nn.Linear(hidden_channels, out_channels) def forward(self, x): x = F.relu(self.lin1(x)) x = F.relu(self.lin2(x)) x = self.lin3(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 4, 4]
MNIST_CNN
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/xd/cxdqslrdqajmcxikxhvxi7lkzd2yepfzcwkkltrpstapeq35h632.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 256 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 4 y1 = (yindex // 4) tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (4*x2) + (36*y1)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/j5/cj5nf2owtsdm2zwcezqxpyn63iwddjyadpotkhm2ua52inoqxdcl.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 4 y1 = (yindex // 4) tmp0 = tl.load(in_ptr0 + (x2 + (16*y3)), xmask & ymask) tl.store(out_ptr0 + (y0 + (4*x2) + (64*y1)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/ra/crarmf7s2qf36jg27hprl42qtwcxcnnoyrgzgevtstzj4qgsdzwl.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_2 = async_compile.triton('triton_poi_fused_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8192, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 8192 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = (yindex // 64) tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (64*x2) + (576*y1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/dd/cddy2xs2uderg6rhu3vap3su355lmjpkrmadmh5gnbcfg2frfd5z.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_3 = async_compile.triton('triton_poi_fused_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16384, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16384 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 128 y1 = (yindex // 128) tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (128*x2) + (1152*y1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/e4/ce4nidkyu2gtcdpalogjljsg5wvmcfnzpr4d7mxmzgqhcik7e3zy.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] # Source node to ATen node mapping: # x => convolution # Graph fragment: # %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_4 = async_compile.triton('triton_poi_fused_convolution_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4096], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4096 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x2), tmp2, None) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/uo/cuoq67x7pplkn56jmv4egzgakdmdolviuhclk6uuvy2isp3yvvam.py # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.native_group_norm] # Source node to ATen node mapping: # x_2 => add, rsqrt, var_mean # Graph fragment: # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view, [2, 3]), kwargs = {correction: 0, keepdim: True}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {}) # %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) triton_per_fused_native_group_norm_5 = async_compile.triton('triton_per_fused_native_group_norm_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[32, 128], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_native_group_norm_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_native_group_norm_5(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 32 rnumel = 128 RBLOCK: tl.constexpr = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex % 8 r3 = (rindex // 8) x0 = xindex % 8 x1 = (xindex // 8) x4 = xindex tmp0 = tl.load(in_ptr0 + (r2 + (8*x0) + (64*r3) + (1024*x1)), xmask, other=0.0) tmp1 = tl.full([1, 1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = tl.where(xmask, tmp3, 0) tmp6 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp8 = tl.where(xmask, tmp6, 0) tmp9 = tl.sum(tmp8, 1)[:, None] tmp10 = tl.full([XBLOCK, 1], 128, tl.int32) tmp11 = tmp10.to(tl.float32) tmp12 = tmp9 / tmp11 tmp13 = tmp3 - tmp12 tmp14 = tmp13 * tmp13 tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK]) tmp17 = tl.where(xmask, tmp15, 0) tmp18 = tl.sum(tmp17, 1)[:, None] tmp19 = 128.0 tmp20 = tmp18 / tmp19 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr2 + (x4), tmp23, xmask) tl.store(out_ptr0 + (x4), tmp12, xmask) tl.store(out_ptr1 + (x4), tmp18, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/t3/ct3qwupet5zpwc3rrfmucbarfddm4ezt2y7zf5ute5oce57arckm.py # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.native_group_norm] # Source node to ATen node mapping: # x_2 => add_1, mul_1 # Graph fragment: # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, %unsqueeze_5), kwargs = {}) # %add_1 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %unsqueeze_2), kwargs = {}) triton_poi_fused_native_group_norm_6 = async_compile.triton('triton_poi_fused_native_group_norm_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4096], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_group_norm_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_group_norm_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4096 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x0 = xindex % 64 x2 = (xindex // 1024) tmp0 = tl.load(in_ptr0 + (x3), None) tmp3 = tl.load(in_ptr1 + ((8*x2) + (x0 // 8)), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr2 + ((8*x2) + (x0 // 8)), None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr3 + (x0), None, eviction_policy='evict_last') tmp14 = tl.load(in_ptr4 + (x0), None, eviction_policy='evict_last') tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = tmp2 - tmp3 tmp6 = 128.0 tmp7 = tmp5 / tmp6 tmp8 = 1e-05 tmp9 = tmp7 + tmp8 tmp10 = libdevice.rsqrt(tmp9) tmp11 = tmp4 * tmp10 tmp13 = tmp11 * tmp12 tmp15 = tmp13 + tmp14 tl.store(out_ptr0 + (x3), tmp15, None) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/bo/cboj3lo2ix24qbhixyncx6gvd7fcgojynowxk4mu7hyobzavs4tx.py # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.convolution] # Source node to ATen node mapping: # x_3 => convolution_1 # Graph fragment: # %convolution_1 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%add_1, %primals_6, %primals_7, [2, 2], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_7 = async_compile.triton('triton_poi_fused_convolution_7', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2048], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_7', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 2048 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x2), tmp2, None) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/7q/c7q6tque53rs7lodp6ydfpgpqa75zh7pikmsrf2pnpy6czejrkpz.py # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.native_group_norm] # Source node to ATen node mapping: # x_5 => add_2, rsqrt_1, var_mean_1 # Graph fragment: # %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_2, [2, 3]), kwargs = {correction: 0, keepdim: True}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-05), kwargs = {}) # %rsqrt_1 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_2,), kwargs = {}) triton_per_fused_native_group_norm_8 = async_compile.triton('triton_per_fused_native_group_norm_8', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[32, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_native_group_norm_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_native_group_norm_8(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 32 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex % 16 r3 = (rindex // 16) x0 = xindex % 8 x1 = (xindex // 8) x4 = xindex tmp0 = tl.load(in_ptr0 + (r2 + (16*x0) + (128*r3) + (512*x1)), xmask, other=0.0) tmp1 = tl.full([1, 1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = tl.where(xmask, tmp3, 0) tmp6 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp8 = tl.where(xmask, tmp6, 0) tmp9 = tl.sum(tmp8, 1)[:, None] tmp10 = tl.full([XBLOCK, 1], 64, tl.int32) tmp11 = tmp10.to(tl.float32) tmp12 = tmp9 / tmp11 tmp13 = tmp3 - tmp12 tmp14 = tmp13 * tmp13 tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK]) tmp17 = tl.where(xmask, tmp15, 0) tmp18 = tl.sum(tmp17, 1)[:, None] tmp19 = 64.0 tmp20 = tmp18 / tmp19 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr2 + (x4), tmp23, xmask) tl.store(out_ptr0 + (x4), tmp12, xmask) tl.store(out_ptr1 + (x4), tmp18, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/uu/cuu4nxa6fxukpo23zd5i5bbo3jmi4tcdz5jxk75g5k63t4k6dxob.py # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.native_group_norm] # Source node to ATen node mapping: # x_5 => add_3, mul_3 # Graph fragment: # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_3, %unsqueeze_11), kwargs = {}) # %add_3 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_3, %unsqueeze_8), kwargs = {}) triton_poi_fused_native_group_norm_9 = async_compile.triton('triton_poi_fused_native_group_norm_9', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2048], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_group_norm_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_group_norm_9(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 2048 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x0 = xindex % 128 x2 = (xindex // 512) tmp0 = tl.load(in_ptr0 + (x3), None) tmp3 = tl.load(in_ptr1 + ((8*x2) + (x0 // 16)), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr2 + ((8*x2) + (x0 // 16)), None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr3 + (x0), None, eviction_policy='evict_last') tmp14 = tl.load(in_ptr4 + (x0), None, eviction_policy='evict_last') tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = tmp2 - tmp3 tmp6 = 64.0 tmp7 = tmp5 / tmp6 tmp8 = 1e-05 tmp9 = tmp7 + tmp8 tmp10 = libdevice.rsqrt(tmp9) tmp11 = tmp4 * tmp10 tmp13 = tmp11 * tmp12 tmp15 = tmp13 + tmp14 tl.store(out_ptr0 + (x3), tmp15, None) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/xr/cxrvhmzov25motoxdbptp5xb5kgxlq4dxjyz2c5uqkf2isadwdos.py # Topologically Sorted Source Nodes: [x_11, x_12], Original ATen: [aten.native_group_norm, aten.mean] # Source node to ATen node mapping: # x_11 => add_7, mul_7 # x_12 => mean # Graph fragment: # %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_7, %unsqueeze_23), kwargs = {}) # %add_7 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_7, %unsqueeze_20), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%add_7, [-1, -2], True), kwargs = {}) triton_poi_fused_mean_native_group_norm_10 = async_compile.triton('triton_poi_fused_mean_native_group_norm_10', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mean_native_group_norm_10', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mean_native_group_norm_10(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 128 x1 = (xindex // 128) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (512*x1)), xmask) tmp3 = tl.load(in_ptr1 + ((x2 // 16)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr2 + ((x2 // 16)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr0 + (128 + x0 + (512*x1)), xmask) tmp23 = tl.load(in_ptr0 + (256 + x0 + (512*x1)), xmask) tmp30 = tl.load(in_ptr0 + (384 + x0 + (512*x1)), xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = tmp2 - tmp3 tmp6 = 64.0 tmp7 = tmp5 / tmp6 tmp8 = 1e-05 tmp9 = tmp7 + tmp8 tmp10 = libdevice.rsqrt(tmp9) tmp11 = tmp4 * tmp10 tmp13 = tmp11 * tmp12 tmp15 = tmp13 + tmp14 tmp17 = triton_helpers.maximum(tmp1, tmp16) tmp18 = tmp17 - tmp3 tmp19 = tmp18 * tmp10 tmp20 = tmp19 * tmp12 tmp21 = tmp20 + tmp14 tmp22 = tmp15 + tmp21 tmp24 = triton_helpers.maximum(tmp1, tmp23) tmp25 = tmp24 - tmp3 tmp26 = tmp25 * tmp10 tmp27 = tmp26 * tmp12 tmp28 = tmp27 + tmp14 tmp29 = tmp22 + tmp28 tmp31 = triton_helpers.maximum(tmp1, tmp30) tmp32 = tmp31 - tmp3 tmp33 = tmp32 * tmp10 tmp34 = tmp33 * tmp12 tmp35 = tmp34 + tmp14 tmp36 = tmp29 + tmp35 tmp37 = 4.0 tmp38 = tmp36 / tmp37 tl.store(out_ptr0 + (x2), tmp38, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17 = args args.clear() assert_size_stride(primals_1, (64, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (64, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (64, ), (1, )) assert_size_stride(primals_5, (64, ), (1, )) assert_size_stride(primals_6, (128, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_7, (128, ), (1, )) assert_size_stride(primals_8, (128, ), (1, )) assert_size_stride(primals_9, (128, ), (1, )) assert_size_stride(primals_10, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_11, (128, ), (1, )) assert_size_stride(primals_12, (128, ), (1, )) assert_size_stride(primals_13, (128, ), (1, )) assert_size_stride(primals_14, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_15, (128, ), (1, )) assert_size_stride(primals_16, (128, ), (1, )) assert_size_stride(primals_17, (128, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4, 3, 3), (36, 1, 12, 4), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] stream0 = get_raw_stream(0) triton_poi_fused_0.run(primals_1, buf0, 256, 9, grid=grid(256, 9), stream=stream0) del primals_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 1, 16, 4), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_1.run(primals_3, buf1, 16, 16, grid=grid(16, 16), stream=stream0) del primals_3 buf2 = empty_strided_cuda((128, 64, 3, 3), (576, 1, 192, 64), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_2.run(primals_6, buf2, 8192, 9, grid=grid(8192, 9), stream=stream0) del primals_6 buf3 = empty_strided_cuda((128, 128, 3, 3), (1152, 1, 384, 128), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_3.run(primals_10, buf3, 16384, 9, grid=grid(16384, 9), stream=stream0) del primals_10 buf4 = empty_strided_cuda((128, 128, 3, 3), (1152, 1, 384, 128), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_3.run(primals_14, buf4, 16384, 9, grid=grid(16384, 9), stream=stream0) del primals_14 # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] buf5 = extern_kernels.convolution(buf1, buf0, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf5, (4, 64, 4, 4), (1024, 1, 256, 64)) buf6 = buf5; del buf5 # reuse # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] triton_poi_fused_convolution_4.run(buf6, primals_2, 4096, grid=grid(4096), stream=stream0) del primals_2 buf7 = empty_strided_cuda((4, 8, 1, 1), (8, 1, 32, 32), torch.float32) buf8 = empty_strided_cuda((4, 8, 1, 1), (8, 1, 32, 32), torch.float32) buf11 = empty_strided_cuda((4, 8, 1, 1), (8, 1, 32, 32), torch.float32) # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.native_group_norm] triton_per_fused_native_group_norm_5.run(buf6, buf7, buf8, buf11, 32, 128, grid=grid(32), stream=stream0) buf10 = empty_strided_cuda((4, 64, 4, 4), (1024, 1, 256, 64), torch.float32) # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.native_group_norm] triton_poi_fused_native_group_norm_6.run(buf6, buf7, buf8, primals_4, primals_5, buf10, 4096, grid=grid(4096), stream=stream0) del primals_5 # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.convolution] buf12 = extern_kernels.convolution(buf10, buf2, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf12, (4, 128, 2, 2), (512, 1, 256, 128)) buf13 = buf12; del buf12 # reuse # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.convolution] triton_poi_fused_convolution_7.run(buf13, primals_7, 2048, grid=grid(2048), stream=stream0) del primals_7 buf14 = buf8; del buf8 # reuse buf15 = empty_strided_cuda((4, 8, 1, 1), (8, 1, 32, 32), torch.float32) buf18 = empty_strided_cuda((4, 8, 1, 1), (8, 1, 32, 32), torch.float32) # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.native_group_norm] triton_per_fused_native_group_norm_8.run(buf13, buf14, buf15, buf18, 32, 64, grid=grid(32), stream=stream0) buf17 = empty_strided_cuda((4, 128, 2, 2), (512, 1, 256, 128), torch.float32) # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.native_group_norm] triton_poi_fused_native_group_norm_9.run(buf13, buf14, buf15, primals_8, primals_9, buf17, 2048, grid=grid(2048), stream=stream0) del primals_9 # Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.convolution] buf19 = extern_kernels.convolution(buf17, buf3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf19, (4, 128, 2, 2), (512, 1, 256, 128)) buf20 = buf19; del buf19 # reuse # Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.convolution] triton_poi_fused_convolution_7.run(buf20, primals_11, 2048, grid=grid(2048), stream=stream0) del primals_11 buf21 = buf15; del buf15 # reuse buf22 = empty_strided_cuda((4, 8, 1, 1), (8, 1, 32, 32), torch.float32) buf25 = empty_strided_cuda((4, 8, 1, 1), (8, 1, 32, 32), torch.float32) # Topologically Sorted Source Nodes: [x_8], Original ATen: [aten.native_group_norm] triton_per_fused_native_group_norm_8.run(buf20, buf21, buf22, buf25, 32, 64, grid=grid(32), stream=stream0) buf24 = empty_strided_cuda((4, 128, 2, 2), (512, 1, 256, 128), torch.float32) # Topologically Sorted Source Nodes: [x_8], Original ATen: [aten.native_group_norm] triton_poi_fused_native_group_norm_9.run(buf20, buf21, buf22, primals_12, primals_13, buf24, 2048, grid=grid(2048), stream=stream0) del primals_13 # Topologically Sorted Source Nodes: [x_9], Original ATen: [aten.convolution] buf26 = extern_kernels.convolution(buf24, buf4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf26, (4, 128, 2, 2), (512, 1, 256, 128)) buf27 = buf26; del buf26 # reuse # Topologically Sorted Source Nodes: [x_9], Original ATen: [aten.convolution] triton_poi_fused_convolution_7.run(buf27, primals_15, 2048, grid=grid(2048), stream=stream0) del primals_15 buf28 = buf22; del buf22 # reuse buf29 = empty_strided_cuda((4, 8, 1, 1), (8, 1, 32, 32), torch.float32) buf31 = empty_strided_cuda((4, 8, 1, 1), (8, 1, 32, 32), torch.float32) # Topologically Sorted Source Nodes: [x_11], Original ATen: [aten.native_group_norm] triton_per_fused_native_group_norm_8.run(buf27, buf28, buf29, buf31, 32, 64, grid=grid(32), stream=stream0) buf32 = empty_strided_cuda((4, 128, 1, 1), (128, 1, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [x_11, x_12], Original ATen: [aten.native_group_norm, aten.mean] triton_poi_fused_mean_native_group_norm_10.run(buf27, buf28, buf29, primals_16, primals_17, buf32, 512, grid=grid(512), stream=stream0) del buf29 del primals_17 return (reinterpret_tensor(buf32, (4, 128), (128, 1), 0), buf0, buf1, primals_4, buf2, primals_8, buf3, primals_12, buf4, primals_16, buf6, buf10, reinterpret_tensor(buf7, (4, 8), (8, 1), 0), reinterpret_tensor(buf11, (4, 8), (8, 1), 0), buf13, buf17, reinterpret_tensor(buf14, (4, 8), (8, 1), 0), reinterpret_tensor(buf18, (4, 8), (8, 1), 0), buf20, buf24, reinterpret_tensor(buf21, (4, 8), (8, 1), 0), reinterpret_tensor(buf25, (4, 8), (8, 1), 0), buf27, reinterpret_tensor(buf28, (4, 8), (8, 1), 0), reinterpret_tensor(buf31, (4, 8), (8, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((64, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((128, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_13 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_14 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_15 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_16 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_17 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.data class SqueezeLastTwo(nn.Module): """A module which squeezes the last two dimensions, ordinary squeeze can be a problem for batch size 1""" def __init__(self): super(SqueezeLastTwo, self).__init__() def forward(self, x): return x.view(x.shape[0], x.shape[1]) class MNIST_CNN(nn.Module): """ Hand-tuned architecture for MNIST. Weirdness I've noticed so far with this architecture: - adding a linear layer after the mean-pool in features hurts RotatedMNIST-100 generalization severely. """ n_outputs = 128 def __init__(self, input_shape): super(MNIST_CNN, self).__init__() self.conv1 = nn.Conv2d(input_shape[0], 64, 3, 1, padding=1) self.conv2 = nn.Conv2d(64, 128, 3, stride=2, padding=1) self.conv3 = nn.Conv2d(128, 128, 3, 1, padding=1) self.conv4 = nn.Conv2d(128, 128, 3, 1, padding=1) self.bn0 = nn.GroupNorm(8, 64) self.bn1 = nn.GroupNorm(8, 128) self.bn2 = nn.GroupNorm(8, 128) self.bn3 = nn.GroupNorm(8, 128) self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) self.squeezeLastTwo = SqueezeLastTwo() def forward(self, x): x = self.conv1(x) x = F.relu(x) x = self.bn0(x) x = self.conv2(x) x = F.relu(x) x = self.bn1(x) x = self.conv3(x) x = F.relu(x) x = self.bn2(x) x = self.conv4(x) x = F.relu(x) x = self.bn3(x) x = self.avgpool(x) x = self.squeezeLastTwo(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_shape': [4, 4]}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 256 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 4 y1 = yindex // 4 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask & ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 4 * x2 + 36 * y1), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 4 y1 = yindex // 4 tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask & ymask) tl.store(out_ptr0 + (y0 + 4 * x2 + 64 * y1), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = yindex // 64 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 64 * x2 + 576 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 128 y1 = yindex // 128 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 128 * x2 + 1152 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_convolution_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, None) @triton.jit def triton_per_fused_native_group_norm_5(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 32 RBLOCK: tl.constexpr = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex % 8 r3 = rindex // 8 x0 = xindex % 8 x1 = xindex // 8 x4 = xindex tmp0 = tl.load(in_ptr0 + (r2 + 8 * x0 + 64 * r3 + 1024 * x1), xmask, other=0.0) tmp1 = tl.full([1, 1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tl.where(xmask, tmp3, 0) tmp6 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp8 = tl.where(xmask, tmp6, 0) tmp9 = tl.sum(tmp8, 1)[:, None] tmp10 = tl.full([XBLOCK, 1], 128, tl.int32) tmp11 = tmp10.to(tl.float32) tmp12 = tmp9 / tmp11 tmp13 = tmp3 - tmp12 tmp14 = tmp13 * tmp13 tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK]) tmp17 = tl.where(xmask, tmp15, 0) tmp18 = tl.sum(tmp17, 1)[:, None] tmp19 = 128.0 tmp20 = tmp18 / tmp19 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr2 + x4, tmp23, xmask) tl.store(out_ptr0 + x4, tmp12, xmask) tl.store(out_ptr1 + x4, tmp18, xmask) @triton.jit def triton_poi_fused_native_group_norm_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x0 = xindex % 64 x2 = xindex // 1024 tmp0 = tl.load(in_ptr0 + x3, None) tmp3 = tl.load(in_ptr1 + (8 * x2 + x0 // 8), None, eviction_policy= 'evict_last') tmp5 = tl.load(in_ptr2 + (8 * x2 + x0 // 8), None, eviction_policy= 'evict_last') tmp12 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last') tmp14 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last') tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = tmp2 - tmp3 tmp6 = 128.0 tmp7 = tmp5 / tmp6 tmp8 = 1e-05 tmp9 = tmp7 + tmp8 tmp10 = libdevice.rsqrt(tmp9) tmp11 = tmp4 * tmp10 tmp13 = tmp11 * tmp12 tmp15 = tmp13 + tmp14 tl.store(out_ptr0 + x3, tmp15, None) @triton.jit def triton_poi_fused_convolution_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, None) @triton.jit def triton_per_fused_native_group_norm_8(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 32 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex % 16 r3 = rindex // 16 x0 = xindex % 8 x1 = xindex // 8 x4 = xindex tmp0 = tl.load(in_ptr0 + (r2 + 16 * x0 + 128 * r3 + 512 * x1), xmask, other=0.0) tmp1 = tl.full([1, 1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tl.where(xmask, tmp3, 0) tmp6 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp8 = tl.where(xmask, tmp6, 0) tmp9 = tl.sum(tmp8, 1)[:, None] tmp10 = tl.full([XBLOCK, 1], 64, tl.int32) tmp11 = tmp10.to(tl.float32) tmp12 = tmp9 / tmp11 tmp13 = tmp3 - tmp12 tmp14 = tmp13 * tmp13 tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK]) tmp17 = tl.where(xmask, tmp15, 0) tmp18 = tl.sum(tmp17, 1)[:, None] tmp19 = 64.0 tmp20 = tmp18 / tmp19 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr2 + x4, tmp23, xmask) tl.store(out_ptr0 + x4, tmp12, xmask) tl.store(out_ptr1 + x4, tmp18, xmask) @triton.jit def triton_poi_fused_native_group_norm_9(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x0 = xindex % 128 x2 = xindex // 512 tmp0 = tl.load(in_ptr0 + x3, None) tmp3 = tl.load(in_ptr1 + (8 * x2 + x0 // 16), None, eviction_policy= 'evict_last') tmp5 = tl.load(in_ptr2 + (8 * x2 + x0 // 16), None, eviction_policy= 'evict_last') tmp12 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last') tmp14 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last') tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = tmp2 - tmp3 tmp6 = 64.0 tmp7 = tmp5 / tmp6 tmp8 = 1e-05 tmp9 = tmp7 + tmp8 tmp10 = libdevice.rsqrt(tmp9) tmp11 = tmp4 * tmp10 tmp13 = tmp11 * tmp12 tmp15 = tmp13 + tmp14 tl.store(out_ptr0 + x3, tmp15, None) @triton.jit def triton_poi_fused_mean_native_group_norm_10(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 128 x1 = xindex // 128 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 512 * x1), xmask) tmp3 = tl.load(in_ptr1 + x2 // 16, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr2 + x2 // 16, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr0 + (128 + x0 + 512 * x1), xmask) tmp23 = tl.load(in_ptr0 + (256 + x0 + 512 * x1), xmask) tmp30 = tl.load(in_ptr0 + (384 + x0 + 512 * x1), xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = tmp2 - tmp3 tmp6 = 64.0 tmp7 = tmp5 / tmp6 tmp8 = 1e-05 tmp9 = tmp7 + tmp8 tmp10 = libdevice.rsqrt(tmp9) tmp11 = tmp4 * tmp10 tmp13 = tmp11 * tmp12 tmp15 = tmp13 + tmp14 tmp17 = triton_helpers.maximum(tmp1, tmp16) tmp18 = tmp17 - tmp3 tmp19 = tmp18 * tmp10 tmp20 = tmp19 * tmp12 tmp21 = tmp20 + tmp14 tmp22 = tmp15 + tmp21 tmp24 = triton_helpers.maximum(tmp1, tmp23) tmp25 = tmp24 - tmp3 tmp26 = tmp25 * tmp10 tmp27 = tmp26 * tmp12 tmp28 = tmp27 + tmp14 tmp29 = tmp22 + tmp28 tmp31 = triton_helpers.maximum(tmp1, tmp30) tmp32 = tmp31 - tmp3 tmp33 = tmp32 * tmp10 tmp34 = tmp33 * tmp12 tmp35 = tmp34 + tmp14 tmp36 = tmp29 + tmp35 tmp37 = 4.0 tmp38 = tmp36 / tmp37 tl.store(out_ptr0 + x2, tmp38, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17) = args args.clear() assert_size_stride(primals_1, (64, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (64,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (64,), (1,)) assert_size_stride(primals_5, (64,), (1,)) assert_size_stride(primals_6, (128, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_7, (128,), (1,)) assert_size_stride(primals_8, (128,), (1,)) assert_size_stride(primals_9, (128,), (1,)) assert_size_stride(primals_10, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_11, (128,), (1,)) assert_size_stride(primals_12, (128,), (1,)) assert_size_stride(primals_13, (128,), (1,)) assert_size_stride(primals_14, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_15, (128,), (1,)) assert_size_stride(primals_16, (128,), (1,)) assert_size_stride(primals_17, (128,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4, 3, 3), (36, 1, 12, 4), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(256, 9)](primals_1, buf0, 256, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 1, 16, 4), torch.float32) triton_poi_fused_1[grid(16, 16)](primals_3, buf1, 16, 16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((128, 64, 3, 3), (576, 1, 192, 64), torch .float32) triton_poi_fused_2[grid(8192, 9)](primals_6, buf2, 8192, 9, XBLOCK= 16, YBLOCK=64, num_warps=4, num_stages=1) del primals_6 buf3 = empty_strided_cuda((128, 128, 3, 3), (1152, 1, 384, 128), torch.float32) triton_poi_fused_3[grid(16384, 9)](primals_10, buf3, 16384, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_10 buf4 = empty_strided_cuda((128, 128, 3, 3), (1152, 1, 384, 128), torch.float32) triton_poi_fused_3[grid(16384, 9)](primals_14, buf4, 16384, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_14 buf5 = extern_kernels.convolution(buf1, buf0, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf5, (4, 64, 4, 4), (1024, 1, 256, 64)) buf6 = buf5 del buf5 triton_poi_fused_convolution_4[grid(4096)](buf6, primals_2, 4096, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf7 = empty_strided_cuda((4, 8, 1, 1), (8, 1, 32, 32), torch.float32) buf8 = empty_strided_cuda((4, 8, 1, 1), (8, 1, 32, 32), torch.float32) buf11 = empty_strided_cuda((4, 8, 1, 1), (8, 1, 32, 32), torch.float32) triton_per_fused_native_group_norm_5[grid(32)](buf6, buf7, buf8, buf11, 32, 128, XBLOCK=8, num_warps=8, num_stages=1) buf10 = empty_strided_cuda((4, 64, 4, 4), (1024, 1, 256, 64), torch .float32) triton_poi_fused_native_group_norm_6[grid(4096)](buf6, buf7, buf8, primals_4, primals_5, buf10, 4096, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 buf12 = extern_kernels.convolution(buf10, buf2, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf12, (4, 128, 2, 2), (512, 1, 256, 128)) buf13 = buf12 del buf12 triton_poi_fused_convolution_7[grid(2048)](buf13, primals_7, 2048, XBLOCK=256, num_warps=4, num_stages=1) del primals_7 buf14 = buf8 del buf8 buf15 = empty_strided_cuda((4, 8, 1, 1), (8, 1, 32, 32), torch.float32) buf18 = empty_strided_cuda((4, 8, 1, 1), (8, 1, 32, 32), torch.float32) triton_per_fused_native_group_norm_8[grid(32)](buf13, buf14, buf15, buf18, 32, 64, XBLOCK=32, num_warps=8, num_stages=1) buf17 = empty_strided_cuda((4, 128, 2, 2), (512, 1, 256, 128), torch.float32) triton_poi_fused_native_group_norm_9[grid(2048)](buf13, buf14, buf15, primals_8, primals_9, buf17, 2048, XBLOCK=256, num_warps =4, num_stages=1) del primals_9 buf19 = extern_kernels.convolution(buf17, buf3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf19, (4, 128, 2, 2), (512, 1, 256, 128)) buf20 = buf19 del buf19 triton_poi_fused_convolution_7[grid(2048)](buf20, primals_11, 2048, XBLOCK=256, num_warps=4, num_stages=1) del primals_11 buf21 = buf15 del buf15 buf22 = empty_strided_cuda((4, 8, 1, 1), (8, 1, 32, 32), torch.float32) buf25 = empty_strided_cuda((4, 8, 1, 1), (8, 1, 32, 32), torch.float32) triton_per_fused_native_group_norm_8[grid(32)](buf20, buf21, buf22, buf25, 32, 64, XBLOCK=32, num_warps=8, num_stages=1) buf24 = empty_strided_cuda((4, 128, 2, 2), (512, 1, 256, 128), torch.float32) triton_poi_fused_native_group_norm_9[grid(2048)](buf20, buf21, buf22, primals_12, primals_13, buf24, 2048, XBLOCK=256, num_warps=4, num_stages=1) del primals_13 buf26 = extern_kernels.convolution(buf24, buf4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf26, (4, 128, 2, 2), (512, 1, 256, 128)) buf27 = buf26 del buf26 triton_poi_fused_convolution_7[grid(2048)](buf27, primals_15, 2048, XBLOCK=256, num_warps=4, num_stages=1) del primals_15 buf28 = buf22 del buf22 buf29 = empty_strided_cuda((4, 8, 1, 1), (8, 1, 32, 32), torch.float32) buf31 = empty_strided_cuda((4, 8, 1, 1), (8, 1, 32, 32), torch.float32) triton_per_fused_native_group_norm_8[grid(32)](buf27, buf28, buf29, buf31, 32, 64, XBLOCK=32, num_warps=8, num_stages=1) buf32 = empty_strided_cuda((4, 128, 1, 1), (128, 1, 1, 1), torch. float32) triton_poi_fused_mean_native_group_norm_10[grid(512)](buf27, buf28, buf29, primals_16, primals_17, buf32, 512, XBLOCK=128, num_warps=4, num_stages=1) del buf29 del primals_17 return (reinterpret_tensor(buf32, (4, 128), (128, 1), 0), buf0, buf1, primals_4, buf2, primals_8, buf3, primals_12, buf4, primals_16, buf6, buf10, reinterpret_tensor(buf7, (4, 8), (8, 1), 0), reinterpret_tensor(buf11, (4, 8), (8, 1), 0), buf13, buf17, reinterpret_tensor(buf14, (4, 8), (8, 1), 0), reinterpret_tensor( buf18, (4, 8), (8, 1), 0), buf20, buf24, reinterpret_tensor(buf21, (4, 8), (8, 1), 0), reinterpret_tensor(buf25, (4, 8), (8, 1), 0), buf27, reinterpret_tensor(buf28, (4, 8), (8, 1), 0), reinterpret_tensor(buf31, (4, 8), (8, 1), 0)) class SqueezeLastTwo(nn.Module): """A module which squeezes the last two dimensions, ordinary squeeze can be a problem for batch size 1""" def __init__(self): super(SqueezeLastTwo, self).__init__() def forward(self, x): return x.view(x.shape[0], x.shape[1]) class MNIST_CNNNew(nn.Module): """ Hand-tuned architecture for MNIST. Weirdness I've noticed so far with this architecture: - adding a linear layer after the mean-pool in features hurts RotatedMNIST-100 generalization severely. """ n_outputs = 128 def __init__(self, input_shape): super(MNIST_CNNNew, self).__init__() self.conv1 = nn.Conv2d(input_shape[0], 64, 3, 1, padding=1) self.conv2 = nn.Conv2d(64, 128, 3, stride=2, padding=1) self.conv3 = nn.Conv2d(128, 128, 3, 1, padding=1) self.conv4 = nn.Conv2d(128, 128, 3, 1, padding=1) self.bn0 = nn.GroupNorm(8, 64) self.bn1 = nn.GroupNorm(8, 128) self.bn2 = nn.GroupNorm(8, 128) self.bn3 = nn.GroupNorm(8, 128) self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) self.squeezeLastTwo = SqueezeLastTwo() def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_6 = self.conv2.weight primals_7 = self.conv2.bias primals_10 = self.conv3.weight primals_8 = self.conv3.bias primals_14 = self.conv4.weight primals_9 = self.conv4.bias primals_4 = self.bn0.weight primals_5 = self.bn0.bias primals_11 = self.bn1.weight primals_12 = self.bn1.bias primals_13 = self.bn2.weight primals_15 = self.bn2.bias primals_16 = self.bn3.weight primals_17 = self.bn3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17]) return output[0]
AllenPu/DomainBed
MNIST_CNN
false
2,042
[ "MIT" ]
0
77519d71471e67f0356134abe0bf01a6dd2fdcfa
https://github.com/AllenPu/DomainBed/tree/77519d71471e67f0356134abe0bf01a6dd2fdcfa
import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.data class SqueezeLastTwo(nn.Module): """A module which squeezes the last two dimensions, ordinary squeeze can be a problem for batch size 1""" def __init__(self): super().__init__() def forward(self, x): return x.view(x.shape[0], x.shape[1]) class Model(nn.Module): """ Hand-tuned architecture for MNIST. Weirdness I've noticed so far with this architecture: - adding a linear layer after the mean-pool in features hurts RotatedMNIST-100 generalization severely. """ n_outputs = 128 def __init__(self, input_shape): super().__init__() self.conv1 = nn.Conv2d(input_shape[0], 64, 3, 1, padding=1) self.conv2 = nn.Conv2d(64, 128, 3, stride=2, padding=1) self.conv3 = nn.Conv2d(128, 128, 3, 1, padding=1) self.conv4 = nn.Conv2d(128, 128, 3, 1, padding=1) self.bn0 = nn.GroupNorm(8, 64) self.bn1 = nn.GroupNorm(8, 128) self.bn2 = nn.GroupNorm(8, 128) self.bn3 = nn.GroupNorm(8, 128) self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) self.squeezeLastTwo = SqueezeLastTwo() def forward(self, x): x = self.conv1(x) x = F.relu(x) x = self.bn0(x) x = self.conv2(x) x = F.relu(x) x = self.bn1(x) x = self.conv3(x) x = F.relu(x) x = self.bn2(x) x = self.conv4(x) x = F.relu(x) x = self.bn3(x) x = self.avgpool(x) x = self.squeezeLastTwo(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
SelfAttention
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/fg/cfg742icmosiwp5ugziye26din5ueqx3v7ntptkkpyackudldrxs.py # Topologically Sorted Source Nodes: [eq], Original ATen: [aten.eq] # Source node to ATen node mapping: # eq => eq # Graph fragment: # %eq : [num_users=2] = call_function[target=torch.ops.aten.eq.Scalar](args = (%primals_10, 0), kwargs = {}) triton_poi_fused_eq_0 = async_compile.triton('triton_poi_fused_eq_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_eq_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_eq_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 0.0 tmp2 = tmp0 == tmp1 tl.store(out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/dq/cdq47qo4ts2g3xlphk7jfetedy4flk56wockrd2oftlqur23w5s5.py # Topologically Sorted Source Nodes: [energy_1, energy_2], Original ATen: [aten.masked_fill, aten._softmax] # Source node to ATen node mapping: # energy_1 => full_default, where # energy_2 => exp, sum_1 # Graph fragment: # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -1.0000000200408773e+20), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%eq, %full_default, %view_9), kwargs = {}) # %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%where, 1), kwargs = {}) # %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [-1], True), kwargs = {}) # %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {}) # %div_tensor : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor, 0.5), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) triton_poi_fused__softmax_masked_fill_1 = async_compile.triton('triton_poi_fused__softmax_masked_fill_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*i1', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_masked_fill_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_masked_fill_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 4 y1 = (yindex // 4) tmp0 = tl.load(in_ptr0 + ((4*x2) + (16*y3)), xmask & ymask, eviction_policy='evict_last').to(tl.int1) tmp1 = tl.load(in_ptr1 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + (y0 + (16*y1)), ymask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (1 + (4*x2) + (16*y3)), xmask & ymask, eviction_policy='evict_last').to(tl.int1) tmp9 = tl.load(in_ptr2 + (4 + y0 + (16*y1)), ymask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr0 + (2 + (4*x2) + (16*y3)), xmask & ymask, eviction_policy='evict_last').to(tl.int1) tmp15 = tl.load(in_ptr2 + (8 + y0 + (16*y1)), ymask, eviction_policy='evict_last') tmp20 = tl.load(in_ptr0 + (3 + (4*x2) + (16*y3)), xmask & ymask, eviction_policy='evict_last').to(tl.int1) tmp21 = tl.load(in_ptr2 + (12 + y0 + (16*y1)), ymask, eviction_policy='evict_last') tmp3 = tmp1 * tmp2 tmp4 = -1.0000000200408773e+20 tmp5 = tl.where(tmp0, tmp4, tmp3) tmp6 = 1.0 tmp7 = tmp5 * tmp6 tmp10 = tmp1 * tmp9 tmp11 = tl.where(tmp8, tmp4, tmp10) tmp12 = tmp11 * tmp6 tmp13 = triton_helpers.maximum(tmp7, tmp12) tmp16 = tmp1 * tmp15 tmp17 = tl.where(tmp14, tmp4, tmp16) tmp18 = tmp17 * tmp6 tmp19 = triton_helpers.maximum(tmp13, tmp18) tmp22 = tmp1 * tmp21 tmp23 = tl.where(tmp20, tmp4, tmp22) tmp24 = tmp23 * tmp6 tmp25 = triton_helpers.maximum(tmp19, tmp24) tmp26 = tmp7 - tmp25 tmp27 = 2.0 tmp28 = tmp26 * tmp27 tmp29 = tl_math.exp(tmp28) tmp30 = tmp12 - tmp25 tmp31 = tmp30 * tmp27 tmp32 = tl_math.exp(tmp31) tmp33 = tmp29 + tmp32 tmp34 = tmp18 - tmp25 tmp35 = tmp34 * tmp27 tmp36 = tl_math.exp(tmp35) tmp37 = tmp33 + tmp36 tmp38 = tmp24 - tmp25 tmp39 = tmp38 * tmp27 tmp40 = tl_math.exp(tmp39) tmp41 = tmp37 + tmp40 tl.store(out_ptr0 + (x2 + (4*y3)), tmp25, xmask & ymask) tl.store(out_ptr1 + (x2 + (4*y3)), tmp41, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/zx/czxkw2dfowl52i5ol7rtewbkdja272fzxfs3ljnrg5rjb2jehxdz.py # Topologically Sorted Source Nodes: [energy_1, energy_2], Original ATen: [aten.masked_fill, aten._softmax] # Source node to ATen node mapping: # energy_1 => full_default, where # energy_2 => div_1, exp # Graph fragment: # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -1.0000000200408773e+20), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%eq, %full_default, %view_9), kwargs = {}) # %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%where, 1), kwargs = {}) # %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {}) # %div_tensor : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor, 0.5), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor,), kwargs = {}) # %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_masked_fill_2 = async_compile.triton('triton_poi_fused__softmax_masked_fill_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*i1', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_masked_fill_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_masked_fill_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x1 = (xindex // 4) % 4 x2 = (xindex // 16) % 4 x3 = (xindex // 64) x0 = xindex % 4 x5 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x4), xmask).to(tl.int1) tmp1 = tl.load(in_ptr1 + (x2 + (4*x1) + (16*x3)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + (x2 + (4*x0) + (16*x3)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr3 + (x5), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr4 + (x5), xmask, eviction_policy='evict_last') tmp3 = tmp1 * tmp2 tmp4 = -1.0000000200408773e+20 tmp5 = tl.where(tmp0, tmp4, tmp3) tmp6 = 1.0 tmp7 = tmp5 * tmp6 tmp9 = tmp7 - tmp8 tmp10 = 2.0 tmp11 = tmp9 * tmp10 tmp12 = tl_math.exp(tmp11) tmp14 = tmp12 / tmp13 tl.store(out_ptr0 + (x4), tmp14, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/gt/cgtqpaqfwouqxpwri7ygertlpra3qn2kcexi7wu53bb7eglybfrs.py # Topologically Sorted Source Nodes: [out], Original ATen: [aten.clone] # Source node to ATen node mapping: # out => clone # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_8,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_3 = async_compile.triton('triton_poi_fused_clone_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_3(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK, YBLOCK]) tmp3 = tmp0 + tmp2 tl.store(out_ptr0 + (x2 + (4*y3)), tmp3, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/cv/ccva5wkkqbsuazdvak5m7p7hmc25v5vunf5dixpzgwz7tfno5e3i.py # Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.cat] # Source node to ATen node mapping: # out_2 => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%view_15, %view_14], -1), kwargs = {}) triton_poi_fused_cat_4 = async_compile.triton('triton_poi_fused_cat_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_4(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x3 = (xindex // 8) x1 = (xindex // 8) % 4 x2 = (xindex // 32) x4 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((4*x3) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.load(in_ptr1 + (x1 + (4*((-4) + x0)) + (16*x2)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + (x4), tmp10, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 1), (16, 4, 1, 1)) assert_size_stride(primals_2, (4, 4, 4, 1), (16, 4, 1, 1)) assert_size_stride(primals_3, (4, 4, 4, 1), (16, 4, 1, 1)) assert_size_stride(primals_4, (1, 1), (1, 1)) assert_size_stride(primals_5, (1, ), (1, )) assert_size_stride(primals_6, (1, 1), (1, 1)) assert_size_stride(primals_7, (1, ), (1, )) assert_size_stride(primals_8, (1, 1), (1, 1)) assert_size_stride(primals_9, (1, ), (1, )) assert_size_stride(primals_10, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_11, (4, 8), (8, 1)) assert_size_stride(primals_12, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((64, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [query_1], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, reinterpret_tensor(primals_1, (64, 1), (1, 1), 0), primals_4, alpha=1, beta=1, out=buf1) del primals_4 del primals_5 buf3 = empty_strided_cuda((64, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [key_1], Original ATen: [aten.addmm] extern_kernels.addmm(primals_7, reinterpret_tensor(primals_2, (64, 1), (1, 1), 0), primals_6, alpha=1, beta=1, out=buf3) del primals_6 del primals_7 buf4 = empty_strided_cuda((64, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 1), (1, 1), 0), primals_8, out=buf4) del primals_8 buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [eq], Original ATen: [aten.eq] stream0 = get_raw_stream(0) triton_poi_fused_eq_0.run(primals_10, buf5, 256, grid=grid(256), stream=stream0) del primals_10 buf6 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf7 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) # Topologically Sorted Source Nodes: [energy_1, energy_2], Original ATen: [aten.masked_fill, aten._softmax] triton_poi_fused__softmax_masked_fill_1.run(buf5, buf1, buf3, buf6, buf7, 16, 4, grid=grid(16, 4), stream=stream0) buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [energy_1, energy_2], Original ATen: [aten.masked_fill, aten._softmax] triton_poi_fused__softmax_masked_fill_2.run(buf5, buf1, buf3, buf6, buf7, buf8, 256, grid=grid(256), stream=stream0) del buf6 buf9 = reinterpret_tensor(buf7, (4, 4, 4, 1, 1), (16, 4, 1, 1, 1), 0); del buf7 # reuse # Topologically Sorted Source Nodes: [out], Original ATen: [aten.clone] triton_poi_fused_clone_3.run(buf4, primals_9, buf9, 16, 4, grid=grid(16, 4), stream=stream0) del primals_9 buf10 = reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 1), 0); del buf4 # reuse # Topologically Sorted Source Nodes: [out], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf8, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf9, (16, 4, 1), (4, 1, 0), 0), out=buf10) buf11 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32) # Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.cat] triton_poi_fused_cat_4.run(buf1, buf10, buf11, 128, grid=grid(128), stream=stream0) buf12 = reinterpret_tensor(buf10, (16, 4), (4, 1), 0); del buf10 # reuse # Topologically Sorted Source Nodes: [out_3], Original ATen: [aten.addmm] extern_kernels.addmm(primals_12, reinterpret_tensor(buf11, (16, 8), (8, 1), 0), reinterpret_tensor(primals_11, (8, 4), (1, 8), 0), alpha=1, beta=1, out=buf12) del primals_12 return (reinterpret_tensor(buf12, (4, 4, 4), (16, 4, 1), 0), buf8, reinterpret_tensor(primals_1, (64, 1), (1, 1), 0), buf1, reinterpret_tensor(primals_2, (64, 1), (1, 1), 0), buf3, reinterpret_tensor(primals_3, (64, 1), (1, 1), 0), buf5, buf8, reinterpret_tensor(buf11, (16, 8), (8, 1), 0), primals_11, reinterpret_tensor(buf9, (16, 1, 4), (4, 1, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 1), (16, 4, 1, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 1), (16, 4, 1, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 1), (16, 4, 1, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((1, 1), (1, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((1, 1), (1, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((1, 1), (1, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class SelfAttention(nn.Module): def __init__(self, embed_dims, heads): super(SelfAttention, self).__init__() self.heads = heads self.embed_dims = embed_dims self.depth = embed_dims // heads self.query = nn.Linear(self.depth, self.depth) self.key = nn.Linear(self.depth, self.depth) self.value = nn.Linear(self.depth, self.depth) self.fc_out = nn.Linear(self.depth * self.heads * 2, self.embed_dims) def forward(self, query, key, value, mask, isDecoder=False): batch, q_len, k_len, v_len = query.shape[0], query.shape[1], key.shape[ 1], value.shape[1] query = query.reshape(batch, q_len, self.heads, self.depth) key = key.reshape(batch, k_len, self.heads, self.depth) value = value.reshape(batch, v_len, self.heads, self.depth) query = self.query(query) key = self.key(key) value = self.value(value) energy = torch.einsum('bqhd, bkhd -> bhqk', [query, key]) if isDecoder: None None None None if mask is not None: if isDecoder: None energy = energy.masked_fill(mask == 0, float('-1e20')) if isDecoder: None None energy = torch.softmax(energy / (self.depth ** 1 / 2), dim=-1) out = torch.einsum('bhqv, bvhd -> bqhd', [energy, value]) out = out.reshape(batch, q_len, self.heads * self.depth) query = query.reshape(batch, q_len, self.heads * self.depth) out = torch.cat([query, out], dim=-1) out = self.fc_out(out) return out, energy def get_inputs(): return [torch.rand([4, 4, 4, 1]), torch.rand([4, 4, 4, 1]), torch.rand( [4, 4, 4, 1]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'embed_dims': 4, 'heads': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_eq_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.0 tmp2 = tmp0 == tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused__softmax_masked_fill_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl. constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 4 y1 = yindex // 4 tmp0 = tl.load(in_ptr0 + (4 * x2 + 16 * y3), xmask & ymask, eviction_policy='evict_last').to(tl.int1) tmp1 = tl.load(in_ptr1 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + (y0 + 16 * y1), ymask, eviction_policy= 'evict_last') tmp8 = tl.load(in_ptr0 + (1 + 4 * x2 + 16 * y3), xmask & ymask, eviction_policy='evict_last').to(tl.int1) tmp9 = tl.load(in_ptr2 + (4 + y0 + 16 * y1), ymask, eviction_policy= 'evict_last') tmp14 = tl.load(in_ptr0 + (2 + 4 * x2 + 16 * y3), xmask & ymask, eviction_policy='evict_last').to(tl.int1) tmp15 = tl.load(in_ptr2 + (8 + y0 + 16 * y1), ymask, eviction_policy= 'evict_last') tmp20 = tl.load(in_ptr0 + (3 + 4 * x2 + 16 * y3), xmask & ymask, eviction_policy='evict_last').to(tl.int1) tmp21 = tl.load(in_ptr2 + (12 + y0 + 16 * y1), ymask, eviction_policy= 'evict_last') tmp3 = tmp1 * tmp2 tmp4 = -1.0000000200408773e+20 tmp5 = tl.where(tmp0, tmp4, tmp3) tmp6 = 1.0 tmp7 = tmp5 * tmp6 tmp10 = tmp1 * tmp9 tmp11 = tl.where(tmp8, tmp4, tmp10) tmp12 = tmp11 * tmp6 tmp13 = triton_helpers.maximum(tmp7, tmp12) tmp16 = tmp1 * tmp15 tmp17 = tl.where(tmp14, tmp4, tmp16) tmp18 = tmp17 * tmp6 tmp19 = triton_helpers.maximum(tmp13, tmp18) tmp22 = tmp1 * tmp21 tmp23 = tl.where(tmp20, tmp4, tmp22) tmp24 = tmp23 * tmp6 tmp25 = triton_helpers.maximum(tmp19, tmp24) tmp26 = tmp7 - tmp25 tmp27 = 2.0 tmp28 = tmp26 * tmp27 tmp29 = tl_math.exp(tmp28) tmp30 = tmp12 - tmp25 tmp31 = tmp30 * tmp27 tmp32 = tl_math.exp(tmp31) tmp33 = tmp29 + tmp32 tmp34 = tmp18 - tmp25 tmp35 = tmp34 * tmp27 tmp36 = tl_math.exp(tmp35) tmp37 = tmp33 + tmp36 tmp38 = tmp24 - tmp25 tmp39 = tmp38 * tmp27 tmp40 = tl_math.exp(tmp39) tmp41 = tmp37 + tmp40 tl.store(out_ptr0 + (x2 + 4 * y3), tmp25, xmask & ymask) tl.store(out_ptr1 + (x2 + 4 * y3), tmp41, xmask & ymask) @triton.jit def triton_poi_fused__softmax_masked_fill_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x1 = xindex // 4 % 4 x2 = xindex // 16 % 4 x3 = xindex // 64 x0 = xindex % 4 x5 = xindex // 4 tmp0 = tl.load(in_ptr0 + x4, xmask).to(tl.int1) tmp1 = tl.load(in_ptr1 + (x2 + 4 * x1 + 16 * x3), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + (x2 + 4 * x0 + 16 * x3), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr3 + x5, xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr4 + x5, xmask, eviction_policy='evict_last') tmp3 = tmp1 * tmp2 tmp4 = -1.0000000200408773e+20 tmp5 = tl.where(tmp0, tmp4, tmp3) tmp6 = 1.0 tmp7 = tmp5 * tmp6 tmp9 = tmp7 - tmp8 tmp10 = 2.0 tmp11 = tmp9 * tmp10 tmp12 = tl_math.exp(tmp11) tmp14 = tmp12 / tmp13 tl.store(out_ptr0 + x4, tmp14, xmask) @triton.jit def triton_poi_fused_clone_3(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK, YBLOCK]) tmp3 = tmp0 + tmp2 tl.store(out_ptr0 + (x2 + 4 * y3), tmp3, xmask & ymask) @triton.jit def triton_poi_fused_cat_4(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x3 = xindex // 8 x1 = xindex // 8 % 4 x2 = xindex // 32 x4 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x3 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (x1 + 4 * (-4 + x0) + 16 * x2), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x4, tmp10, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12 ) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 1), (16, 4, 1, 1)) assert_size_stride(primals_2, (4, 4, 4, 1), (16, 4, 1, 1)) assert_size_stride(primals_3, (4, 4, 4, 1), (16, 4, 1, 1)) assert_size_stride(primals_4, (1, 1), (1, 1)) assert_size_stride(primals_5, (1,), (1,)) assert_size_stride(primals_6, (1, 1), (1, 1)) assert_size_stride(primals_7, (1,), (1,)) assert_size_stride(primals_8, (1, 1), (1, 1)) assert_size_stride(primals_9, (1,), (1,)) assert_size_stride(primals_10, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_11, (4, 8), (8, 1)) assert_size_stride(primals_12, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(primals_1, (64, 1), (1, 1), 0), primals_4, alpha=1, beta=1, out=buf1) del primals_4 del primals_5 buf3 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(primals_2, (64, 1), (1, 1), 0), primals_6, alpha=1, beta=1, out=buf3) del primals_6 del primals_7 buf4 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 1), (1, 1), 0), primals_8, out=buf4) del primals_8 buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_eq_0[grid(256)](primals_10, buf5, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_10 buf6 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf7 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) triton_poi_fused__softmax_masked_fill_1[grid(16, 4)](buf5, buf1, buf3, buf6, buf7, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_masked_fill_2[grid(256)](buf5, buf1, buf3, buf6, buf7, buf8, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf6 buf9 = reinterpret_tensor(buf7, (4, 4, 4, 1, 1), (16, 4, 1, 1, 1), 0) del buf7 triton_poi_fused_clone_3[grid(16, 4)](buf4, primals_9, buf9, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) del primals_9 buf10 = reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 1), 0) del buf4 extern_kernels.bmm(reinterpret_tensor(buf8, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf9, (16, 4, 1), (4, 1, 0), 0), out=buf10) buf11 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32) triton_poi_fused_cat_4[grid(128)](buf1, buf10, buf11, 128, XBLOCK= 128, num_warps=4, num_stages=1) buf12 = reinterpret_tensor(buf10, (16, 4), (4, 1), 0) del buf10 extern_kernels.addmm(primals_12, reinterpret_tensor(buf11, (16, 8), (8, 1), 0), reinterpret_tensor(primals_11, (8, 4), (1, 8), 0), alpha=1, beta=1, out=buf12) del primals_12 return reinterpret_tensor(buf12, (4, 4, 4), (16, 4, 1), 0 ), buf8, reinterpret_tensor(primals_1, (64, 1), (1, 1), 0 ), buf1, reinterpret_tensor(primals_2, (64, 1), (1, 1), 0 ), buf3, reinterpret_tensor(primals_3, (64, 1), (1, 1), 0 ), buf5, buf8, reinterpret_tensor(buf11, (16, 8), (8, 1), 0 ), primals_11, reinterpret_tensor(buf9, (16, 1, 4), (4, 1, 1), 0) class SelfAttentionNew(nn.Module): def __init__(self, embed_dims, heads): super(SelfAttentionNew, self).__init__() self.heads = heads self.embed_dims = embed_dims self.depth = embed_dims // heads self.query = nn.Linear(self.depth, self.depth) self.key = nn.Linear(self.depth, self.depth) self.value = nn.Linear(self.depth, self.depth) self.fc_out = nn.Linear(self.depth * self.heads * 2, self.embed_dims) def forward(self, input_0, input_1, input_2, input_3): primals_4 = self.query.weight primals_5 = self.query.bias primals_6 = self.key.weight primals_7 = self.key.bias primals_8 = self.value.weight primals_9 = self.value.bias primals_11 = self.fc_out.weight primals_12 = self.fc_out.bias primals_1 = input_0 primals_2 = input_1 primals_3 = input_2 primals_10 = input_3 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12]) return output[0], output[1]
Brandon-mg/LipReader-Transformer
SelfAttention
false
2,043
[ "MIT" ]
0
0fe52957943368d7c5b8d1b0df39e3fb14f7c035
https://github.com/Brandon-mg/LipReader-Transformer/tree/0fe52957943368d7c5b8d1b0df39e3fb14f7c035
import torch import torch.nn as nn class Model(nn.Module): def __init__(self, embed_dims, heads): super().__init__() self.heads = heads self.embed_dims = embed_dims self.depth = embed_dims // heads self.query = nn.Linear(self.depth, self.depth) self.key = nn.Linear(self.depth, self.depth) self.value = nn.Linear(self.depth, self.depth) self.fc_out = nn.Linear(self.depth * self.heads * 2, self.embed_dims) def forward(self, query, key, value, mask, isDecoder=False): batch, q_len, k_len, v_len = query.shape[0], query.shape[1], key.shape[ 1], value.shape[1] query = query.reshape(batch, q_len, self.heads, self.depth) key = key.reshape(batch, k_len, self.heads, self.depth) value = value.reshape(batch, v_len, self.heads, self.depth) query = self.query(query) key = self.key(key) value = self.value(value) energy = torch.einsum('bqhd, bkhd -> bhqk', [query, key]) if isDecoder: None None None None if mask is not None: if isDecoder: None energy = energy.masked_fill(mask == 0, float('-1e20')) if isDecoder: None None energy = torch.softmax(energy / (self.depth ** 1 / 2), dim=-1) out = torch.einsum('bhqv, bvhd -> bqhd', [energy, value]) out = out.reshape(batch, q_len, self.heads * self.depth) query = query.reshape(batch, q_len, self.heads * self.depth) out = torch.cat([query, out], dim=-1) out = self.fc_out(out) return out, energy def get_inputs(): return [torch.rand([4, 4, 4, 1]), torch.rand([4, 4, 4, 1]), torch.rand( [4, 4, 4, 1]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 4]
SmoothL1Loss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/c5/cc5asg4nd3jaxgpbn6ptpyi6axgsaygnv5pqlnptuo35p2hhtygb.py # Topologically Sorted Source Nodes: [loss, mul], Original ATen: [aten.smooth_l1_loss, aten.mul] # Source node to ATen node mapping: # loss => abs_1, div, lt, mean, mul, pow_1, sub, sub_1, where # mul => mul_1 # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %arg0_1), kwargs = {}) # %abs_1 : [num_users=3] = call_function[target=torch.ops.aten.abs.default](args = (%sub,), kwargs = {}) # %lt : [num_users=1] = call_function[target=torch.ops.aten.lt.Scalar](args = (%abs_1, 1.0), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%abs_1, 2), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_1, 0.5), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul, 1.0), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%abs_1, 0.5), kwargs = {}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%lt, %div, %sub_1), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%where,), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean, 1.0), kwargs = {}) triton_per_fused_mul_smooth_l1_loss_0 = async_compile.triton('triton_per_fused_mul_smooth_l1_loss_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mul_smooth_l1_loss_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_mul_smooth_l1_loss_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.load(in_ptr1 + (r0), None) tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp4 = 1.0 tmp5 = tmp3 < tmp4 tmp6 = tmp3 * tmp3 tmp7 = 0.5 tmp8 = tmp6 * tmp7 tmp9 = tmp8 * tmp4 tmp10 = tmp3 - tmp7 tmp11 = tl.where(tmp5, tmp9, tmp10) tmp12 = tl.broadcast_to(tmp11, [RBLOCK]) tmp14 = triton_helpers.promote_to_tensor(tl.sum(tmp12, 0)) tmp15 = 256.0 tmp16 = tmp14 / tmp15 tmp17 = tmp16 * tmp4 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp17, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [loss, mul], Original ATen: [aten.smooth_l1_loss, aten.mul] stream0 = get_raw_stream(0) triton_per_fused_mul_smooth_l1_loss_0.run(buf1, arg1_1, arg0_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class SmoothL1Loss(nn.Module): """SmoothL1Loss loss. Args: use_target_weight (bool): Option to use weighted MSE loss. Different joint types may have different target weights. loss_weight (float): Weight of the loss. Default: 1.0. """ def __init__(self, use_target_weight=False, loss_weight=1.0): super().__init__() self.criterion = F.smooth_l1_loss self.use_target_weight = use_target_weight self.loss_weight = loss_weight def forward(self, output, target, target_weight=None): """Forward function. Note: - batch_size: N - num_keypoints: K - dimension of keypoints: D (D=2 or D=3) Args: output (torch.Tensor[N, K, D]): Output regression. target (torch.Tensor[N, K, D]): Target regression. target_weight (torch.Tensor[N, K, D]): Weights across different joint types. """ if self.use_target_weight: assert target_weight is not None loss = self.criterion(output * target_weight, target * target_weight) else: loss = self.criterion(output, target) return loss * self.loss_weight def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_mul_smooth_l1_loss_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp4 = 1.0 tmp5 = tmp3 < tmp4 tmp6 = tmp3 * tmp3 tmp7 = 0.5 tmp8 = tmp6 * tmp7 tmp9 = tmp8 * tmp4 tmp10 = tmp3 - tmp7 tmp11 = tl.where(tmp5, tmp9, tmp10) tmp12 = tl.broadcast_to(tmp11, [RBLOCK]) tmp14 = triton_helpers.promote_to_tensor(tl.sum(tmp12, 0)) tmp15 = 256.0 tmp16 = tmp14 / tmp15 tmp17 = tmp16 * tmp4 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp17, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_mul_smooth_l1_loss_0[grid(1)](buf1, arg1_1, arg0_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class SmoothL1LossNew(nn.Module): """SmoothL1Loss loss. Args: use_target_weight (bool): Option to use weighted MSE loss. Different joint types may have different target weights. loss_weight (float): Weight of the loss. Default: 1.0. """ def __init__(self, use_target_weight=False, loss_weight=1.0): super().__init__() self.criterion = F.smooth_l1_loss self.use_target_weight = use_target_weight self.loss_weight = loss_weight def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
ALISCIFP/mmpose
SmoothL1Loss
false
2,044
[ "Apache-2.0" ]
0
2433e3dbcc44baa2253e2a7c748ba0216937933e
https://github.com/ALISCIFP/mmpose/tree/2433e3dbcc44baa2253e2a7c748ba0216937933e
import torch import torch.nn as nn import torch.nn.functional as F class Model(nn.Module): """SmoothL1Loss loss. Args: use_target_weight (bool): Option to use weighted MSE loss. Different joint types may have different target weights. loss_weight (float): Weight of the loss. Default: 1.0. """ def __init__(self, use_target_weight=False, loss_weight=1.0): super().__init__() self.criterion = F.smooth_l1_loss self.use_target_weight = use_target_weight self.loss_weight = loss_weight def forward(self, output, target, target_weight=None): """Forward function. Note: - batch_size: N - num_keypoints: K - dimension of keypoints: D (D=2 or D=3) Args: output (torch.Tensor[N, K, D]): Output regression. target (torch.Tensor[N, K, D]): Target regression. target_weight (torch.Tensor[N, K, D]): Weights across different joint types. """ if self.use_target_weight: assert target_weight is not None loss = self.criterion(output * target_weight, target * target_weight) else: loss = self.criterion(output, target) return loss * self.loss_weight def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
MultiHead
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/v6/cv6odvhmmcyvquog4eo62pdliew53orxzwe2wfzampr64jy3ppa7.py # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.add] # Source node to ATen node mapping: # out_1 => add # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %primals_5), kwargs = {}) triton_poi_fused_add_0 = async_compile.triton('triton_poi_fused_add_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x2), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/pr/cpr46kebggqhplwxfjepx3opy5gwceg23zjukixehtkuuugc5juo.py # Topologically Sorted Source Nodes: [score], Original ATen: [aten.bmm, aten.transpose] # Source node to ATen node mapping: # score => bmm # Graph fragment: # %bmm : [num_users=2] = call_function[target=torch.ops.aten.bmm.default](args = (%view_22, %view_25), kwargs = {}) # %permute_16 : [num_users=1] = call_function[target=torch.ops.aten.permute.default](args = (%view_22, [0, 2, 1]), kwargs = {}) triton_poi_fused_bmm_transpose_1 = async_compile.triton('triton_poi_fused_bmm_transpose_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_bmm_transpose_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_bmm_transpose_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) % 4 x2 = (xindex // 16) x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (4*x1) + (16*x2) + (64*((x1 + (4*(x2 % 4))) // 16))), xmask) tl.store(out_ptr0 + (x3), tmp0, xmask) tl.store(out_ptr1 + (x3), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/s3/cs35hxhkkmqsjvinhszu6rfwn3hooazjypcbwiv3xr2ywjn3ewei.py # Topologically Sorted Source Nodes: [score_1, max_1, src_max, sub, out_6], Original ATen: [aten.div, aten.max, aten.clamp, aten.sub, aten.exp] # Source node to ATen node mapping: # max_1 => max_1 # out_6 => exp # score_1 => div # src_max => clamp_min # sub => sub # Graph fragment: # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_26, 2.0), kwargs = {}) # %max_1 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%div, -1, True), kwargs = {}) # %clamp_min : [num_users=2] = call_function[target=torch.ops.aten.clamp_min.default](args = (%getitem, 0), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div, %clamp_min), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused_clamp_div_exp_max_sub_2 = async_compile.triton('triton_poi_fused_clamp_div_exp_max_sub_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clamp_div_exp_max_sub_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clamp_div_exp_max_sub_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp3 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = 0.0 tmp15 = triton_helpers.maximum(tmp13, tmp14) tmp16 = tmp2 - tmp15 tmp17 = tl_math.exp(tmp16) tl.store(out_ptr0 + (x2), tmp17, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/rq/crqro5go55q5jp5dqpl4be2mqfi5nsh7elfb6swmltvboawz2isq.py # Topologically Sorted Source Nodes: [score_1, max_1, src_max, sum_1, sub_1, exp_1, add], Original ATen: [aten.div, aten.max, aten.clamp, aten.sum, aten.rsub, aten.exp, aten.add] # Source node to ATen node mapping: # add => add_3 # exp_1 => exp_1 # max_1 => max_1 # score_1 => div # src_max => clamp_min # sub_1 => sub_1 # sum_1 => sum_1 # Graph fragment: # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_26, 2.0), kwargs = {}) # %max_1 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%div, -1, True), kwargs = {}) # %clamp_min : [num_users=2] = call_function[target=torch.ops.aten.clamp_min.default](args = (%getitem, 0), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (0, %clamp_min), kwargs = {}) # %exp_1 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub_1,), kwargs = {}) # %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_1, %exp_1), kwargs = {}) triton_poi_fused_add_clamp_div_exp_max_rsub_sum_3 = async_compile.triton('triton_poi_fused_add_clamp_div_exp_max_rsub_sum_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_div_exp_max_rsub_sum_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_clamp_div_exp_max_rsub_sum_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp8 = 0.5 tmp9 = tmp7 * tmp8 tmp11 = tmp10 * tmp8 tmp12 = triton_helpers.maximum(tmp9, tmp11) tmp14 = tmp13 * tmp8 tmp15 = triton_helpers.maximum(tmp12, tmp14) tmp17 = tmp16 * tmp8 tmp18 = triton_helpers.maximum(tmp15, tmp17) tmp19 = 0.0 tmp20 = triton_helpers.maximum(tmp18, tmp19) tmp21 = tmp19 - tmp20 tmp22 = tl_math.exp(tmp21) tmp23 = tmp6 + tmp22 tl.store(out_ptr0 + (x0), tmp23, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/tc/ctcdm2hcp45nesppfpnqkkcad3pvaqhoo4vgpxv6xzpnd4nc6svo.py # Topologically Sorted Source Nodes: [score_1, max_1, src_max, sum_1, sub_1, exp_1, add, out_7], Original ATen: [aten.div, aten.max, aten.clamp, aten.sum, aten.rsub, aten.exp, aten.add] # Source node to ATen node mapping: # add => add_3 # exp_1 => exp_1 # max_1 => max_1 # out_7 => div_1 # score_1 => div # src_max => clamp_min # sub_1 => sub_1 # sum_1 => sum_1 # Graph fragment: # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_26, 2.0), kwargs = {}) # %max_1 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%div, -1, True), kwargs = {}) # %clamp_min : [num_users=2] = call_function[target=torch.ops.aten.clamp_min.default](args = (%getitem, 0), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (0, %clamp_min), kwargs = {}) # %exp_1 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub_1,), kwargs = {}) # %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_1, %exp_1), kwargs = {}) # %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %add_3), kwargs = {}) triton_poi_fused_add_clamp_div_exp_max_rsub_sum_4 = async_compile.triton('triton_poi_fused_add_clamp_div_exp_max_rsub_sum_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_div_exp_max_rsub_sum_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_clamp_div_exp_max_rsub_sum_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 / tmp1 tl.store(in_out_ptr0 + (x2), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (1, 4, 4), (16, 4, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (1, 4, 4), (16, 4, 1)) assert_size_stride(primals_7, (4, ), (1, )) assert_size_stride(primals_8, (1, 4, 4), (16, 4, 1)) assert_size_stride(primals_9, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [out], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (4, 1), 0), out=buf0) del primals_4 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (4, 1), 0), out=buf1) del primals_6 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [out_4], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (4, 1), 0), out=buf2) del primals_8 buf3 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.add] stream0 = get_raw_stream(0) triton_poi_fused_add_0.run(buf3, primals_5, 256, grid=grid(256), stream=stream0) del primals_5 buf4 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf1 # reuse # Topologically Sorted Source Nodes: [out_3], Original ATen: [aten.add] triton_poi_fused_add_0.run(buf4, primals_7, 256, grid=grid(256), stream=stream0) del primals_7 buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) buf15 = empty_strided_cuda((16, 4, 4), (16, 1, 4), torch.float32) # Topologically Sorted Source Nodes: [score], Original ATen: [aten.bmm, aten.transpose] triton_poi_fused_bmm_transpose_1.run(buf3, buf5, buf15, 256, grid=grid(256), stream=stream0) buf6 = reinterpret_tensor(buf3, (16, 4, 4), (16, 1, 4), 0); del buf3 # reuse buf16 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [score], Original ATen: [aten.bmm, aten.transpose] triton_poi_fused_bmm_transpose_1.run(buf4, buf6, buf16, 256, grid=grid(256), stream=stream0) buf7 = reinterpret_tensor(buf4, (16, 4, 4), (16, 4, 1), 0); del buf4 # reuse # Topologically Sorted Source Nodes: [score], Original ATen: [aten.bmm] extern_kernels.bmm(buf5, buf6, out=buf7) buf8 = reinterpret_tensor(buf6, (4, 4, 1, 4, 4), (64, 16, 256, 4, 1), 0); del buf6 # reuse # Topologically Sorted Source Nodes: [score_1, max_1, src_max, sub, out_6], Original ATen: [aten.div, aten.max, aten.clamp, aten.sub, aten.exp] triton_poi_fused_clamp_div_exp_max_sub_2.run(buf7, buf8, 256, grid=grid(256), stream=stream0) buf9 = empty_strided_cuda((4, 4, 1, 4, 1), (16, 4, 64, 1, 64), torch.float32) # Topologically Sorted Source Nodes: [score_1, max_1, src_max, sum_1, sub_1, exp_1, add], Original ATen: [aten.div, aten.max, aten.clamp, aten.sum, aten.rsub, aten.exp, aten.add] triton_poi_fused_add_clamp_div_exp_max_rsub_sum_3.run(buf8, buf7, buf9, 64, grid=grid(64), stream=stream0) buf10 = reinterpret_tensor(buf8, (4, 4, 1, 4, 4), (64, 16, 16, 4, 1), 0); del buf8 # reuse # Topologically Sorted Source Nodes: [score_1, max_1, src_max, sum_1, sub_1, exp_1, add, out_7], Original ATen: [aten.div, aten.max, aten.clamp, aten.sum, aten.rsub, aten.exp, aten.add] triton_poi_fused_add_clamp_div_exp_max_rsub_sum_4.run(buf10, buf9, 256, grid=grid(256), stream=stream0) del buf9 buf11 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf2 # reuse # Topologically Sorted Source Nodes: [out_5], Original ATen: [aten.add] triton_poi_fused_add_0.run(buf11, primals_9, 256, grid=grid(256), stream=stream0) del primals_9 buf12 = buf5; del buf5 # reuse buf14 = empty_strided_cuda((16, 4, 4), (16, 1, 4), torch.float32) # Topologically Sorted Source Nodes: [out_8], Original ATen: [aten.bmm, aten.transpose] triton_poi_fused_bmm_transpose_1.run(buf11, buf12, buf14, 256, grid=grid(256), stream=stream0) buf13 = reinterpret_tensor(buf11, (16, 4, 4), (16, 4, 1), 0); del buf11 # reuse # Topologically Sorted Source Nodes: [out_8], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf10, (16, 4, 4), (16, 4, 1), 0), buf12, out=buf13) del buf12 return (reinterpret_tensor(buf13, (4, 4, 4, 4), (64, 16, 4, 1), 0), buf7, buf10, buf14, buf15, buf16, reinterpret_tensor(primals_3, (4, 64), (1, 4), 0), reinterpret_tensor(primals_2, (4, 64), (1, 4), 0), reinterpret_tensor(primals_1, (4, 64), (1, 4), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((1, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((1, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((1, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch from torch import Tensor from torch.nn import Linear import torch.nn.functional as F from torch.nn import Parameter import torch.utils.data def uniform(size, tensor): bound = 1.0 / math.sqrt(size) if tensor is not None: tensor.data.uniform_(-bound, bound) def kaiming_uniform(tensor, fan, a): if tensor is not None: bound = math.sqrt(6 / ((1 + a ** 2) * fan)) tensor.data.uniform_(-bound, bound) def restricted_softmax(src, dim=-1, margin=0): src_max = torch.clamp(src.max(dim=dim, keepdim=True)[0], min=0) out = (src - src_max).exp() out = out / (out.sum(dim=dim, keepdim=True) + (margin - src_max).exp()) return out class Linear(torch.nn.Module): def __init__(self, in_channels, out_channels, groups=1, bias=True): super(Linear, self).__init__() assert in_channels % groups == 0 and out_channels % groups == 0 self.in_channels = in_channels self.out_channels = out_channels self.groups = groups self.weight = Parameter(Tensor(groups, in_channels // groups, out_channels // groups)) if bias: self.bias = Parameter(torch.Tensor(out_channels)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): kaiming_uniform(self.weight, fan=self.weight.size(1), a=math.sqrt(5)) uniform(self.weight.size(1), self.bias) def forward(self, src): if self.groups > 1: size = list(src.size())[:-1] src = src.view(-1, self.groups, self.in_channels // self.groups) src = src.transpose(0, 1).contiguous() out = torch.matmul(src, self.weight) out = out.transpose(1, 0).contiguous() out = out.view(*(size + [self.out_channels])) else: out = torch.matmul(src, self.weight.squeeze(0)) if self.bias is not None: out += self.bias return out def __repr__(self): return '{}({}, {}, groups={}, bias={})'.format(self.__class__. __name__, self.in_channels, self.out_channels, self.groups, self.bias is not None) class Attention(torch.nn.Module): def __init__(self, dropout=0): super(Attention, self).__init__() self.dropout = dropout def forward(self, query, key, value): assert query.dim() == key.dim() == value.dim() >= 2 assert query.size(-1) == key.size(-1) assert key.size(-2) == value.size(-2) score = torch.matmul(query, key.transpose(-2, -1)) score = score / math.sqrt(key.size(-1)) score = restricted_softmax(score, dim=-1) score = F.dropout(score, p=self.dropout, training=self.training) return torch.matmul(score, value) def __repr__(self): return '{}(dropout={})'.format(self.__class__.__name__, self.dropout) class MultiHead(Attention): def __init__(self, in_channels, out_channels, heads=1, groups=1, dropout=0, bias=True): super(MultiHead, self).__init__(dropout) self.in_channels = in_channels self.out_channels = out_channels self.heads = heads self.groups = groups self.bias = bias assert in_channels % heads == 0 and out_channels % heads == 0 assert in_channels % groups == 0 and out_channels % groups == 0 assert max(groups, self.heads) % min(groups, self.heads) == 0 self.lin_q = Linear(in_channels, out_channels, groups, bias) self.lin_k = Linear(in_channels, out_channels, groups, bias) self.lin_v = Linear(in_channels, out_channels, groups, bias) self.reset_parameters() def reset_parameters(self): self.lin_q.reset_parameters() self.lin_k.reset_parameters() self.lin_v.reset_parameters() def forward(self, query, key, value): assert query.dim() == key.dim() == value.dim() >= 2 assert query.size(-1) == key.size(-1) == value.size(-1) assert key.size(-2) == value.size(-2) query = self.lin_q(query) key = self.lin_k(key) value = self.lin_v(value) size = list(query.size())[:-2] out_channels_per_head = self.out_channels // self.heads query_size = size + [query.size(-2), self.heads, out_channels_per_head] query = query.view(*query_size).transpose(-2, -3) key_size = size + [key.size(-2), self.heads, out_channels_per_head] key = key.view(*key_size).transpose(-2, -3) value_size = size + [value.size(-2), self.heads, out_channels_per_head] value = value.view(*value_size).transpose(-2, -3) out = super(MultiHead, self).forward(query, key, value) out = out.transpose(-3, -2).contiguous() out = out.view(*(size + [query.size(-2), self.out_channels])) return out def __repr__(self): return '{}({}, {}, heads={}, groups={}, dropout={}, bias={})'.format( self.__class__.__name__, self.in_channels, self.out_channels, self.heads, self.groups, self.dropout, self.bias) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import math from torch import Tensor from torch.nn import Linear import torch.nn.functional as F from torch.nn import Parameter import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) @triton.jit def triton_poi_fused_bmm_transpose_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x1 + 16 * x2 + 64 * ((x1 + 4 * (x2 % 4)) // 16)), xmask) tl.store(out_ptr0 + x3, tmp0, xmask) tl.store(out_ptr1 + x3, tmp0, xmask) @triton.jit def triton_poi_fused_clamp_div_exp_max_sub_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = 0.0 tmp15 = triton_helpers.maximum(tmp13, tmp14) tmp16 = tmp2 - tmp15 tmp17 = tl_math.exp(tmp16) tl.store(out_ptr0 + x2, tmp17, xmask) @triton.jit def triton_poi_fused_add_clamp_div_exp_max_rsub_sum_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp13 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp16 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp8 = 0.5 tmp9 = tmp7 * tmp8 tmp11 = tmp10 * tmp8 tmp12 = triton_helpers.maximum(tmp9, tmp11) tmp14 = tmp13 * tmp8 tmp15 = triton_helpers.maximum(tmp12, tmp14) tmp17 = tmp16 * tmp8 tmp18 = triton_helpers.maximum(tmp15, tmp17) tmp19 = 0.0 tmp20 = triton_helpers.maximum(tmp18, tmp19) tmp21 = tmp19 - tmp20 tmp22 = tl_math.exp(tmp21) tmp23 = tmp6 + tmp22 tl.store(out_ptr0 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_add_clamp_div_exp_max_rsub_sum_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 / tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (1, 4, 4), (16, 4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (1, 4, 4), (16, 4, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (1, 4, 4), (16, 4, 1)) assert_size_stride(primals_9, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (4, 1), 0), out=buf0) del primals_4 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (4, 1), 0), out=buf1) del primals_6 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (4, 1), 0), out=buf2) del primals_8 buf3 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_add_0[grid(256)](buf3, primals_5, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf4 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf1 triton_poi_fused_add_0[grid(256)](buf4, primals_7, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_7 buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) buf15 = empty_strided_cuda((16, 4, 4), (16, 1, 4), torch.float32) triton_poi_fused_bmm_transpose_1[grid(256)](buf3, buf5, buf15, 256, XBLOCK=128, num_warps=4, num_stages=1) buf6 = reinterpret_tensor(buf3, (16, 4, 4), (16, 1, 4), 0) del buf3 buf16 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_bmm_transpose_1[grid(256)](buf4, buf6, buf16, 256, XBLOCK=128, num_warps=4, num_stages=1) buf7 = reinterpret_tensor(buf4, (16, 4, 4), (16, 4, 1), 0) del buf4 extern_kernels.bmm(buf5, buf6, out=buf7) buf8 = reinterpret_tensor(buf6, (4, 4, 1, 4, 4), (64, 16, 256, 4, 1), 0 ) del buf6 triton_poi_fused_clamp_div_exp_max_sub_2[grid(256)](buf7, buf8, 256, XBLOCK=256, num_warps=4, num_stages=1) buf9 = empty_strided_cuda((4, 4, 1, 4, 1), (16, 4, 64, 1, 64), torch.float32) triton_poi_fused_add_clamp_div_exp_max_rsub_sum_3[grid(64)](buf8, buf7, buf9, 64, XBLOCK=64, num_warps=1, num_stages=1) buf10 = reinterpret_tensor(buf8, (4, 4, 1, 4, 4), (64, 16, 16, 4, 1), 0 ) del buf8 triton_poi_fused_add_clamp_div_exp_max_rsub_sum_4[grid(256)](buf10, buf9, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf9 buf11 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf2 triton_poi_fused_add_0[grid(256)](buf11, primals_9, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_9 buf12 = buf5 del buf5 buf14 = empty_strided_cuda((16, 4, 4), (16, 1, 4), torch.float32) triton_poi_fused_bmm_transpose_1[grid(256)](buf11, buf12, buf14, 256, XBLOCK=128, num_warps=4, num_stages=1) buf13 = reinterpret_tensor(buf11, (16, 4, 4), (16, 4, 1), 0) del buf11 extern_kernels.bmm(reinterpret_tensor(buf10, (16, 4, 4), (16, 4, 1), 0), buf12, out=buf13) del buf12 return reinterpret_tensor(buf13, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), buf7, buf10, buf14, buf15, buf16, reinterpret_tensor(primals_3, (4, 64), (1, 4), 0), reinterpret_tensor(primals_2, (4, 64), (1, 4), 0 ), reinterpret_tensor(primals_1, (4, 64), (1, 4), 0) def uniform(size, tensor): bound = 1.0 / math.sqrt(size) if tensor is not None: tensor.data.uniform_(-bound, bound) def kaiming_uniform(tensor, fan, a): if tensor is not None: bound = math.sqrt(6 / ((1 + a ** 2) * fan)) tensor.data.uniform_(-bound, bound) def restricted_softmax(src, dim=-1, margin=0): src_max = torch.clamp(src.max(dim=dim, keepdim=True)[0], min=0) out = (src - src_max).exp() out = out / (out.sum(dim=dim, keepdim=True) + (margin - src_max).exp()) return out class Linear(torch.nn.Module): def __init__(self, in_channels, out_channels, groups=1, bias=True): super(Linear, self).__init__() assert in_channels % groups == 0 and out_channels % groups == 0 self.in_channels = in_channels self.out_channels = out_channels self.groups = groups self.weight = Parameter(Tensor(groups, in_channels // groups, out_channels // groups)) if bias: self.bias = Parameter(torch.Tensor(out_channels)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): kaiming_uniform(self.weight, fan=self.weight.size(1), a=math.sqrt(5)) uniform(self.weight.size(1), self.bias) def forward(self, src): if self.groups > 1: size = list(src.size())[:-1] src = src.view(-1, self.groups, self.in_channels // self.groups) src = src.transpose(0, 1).contiguous() out = torch.matmul(src, self.weight) out = out.transpose(1, 0).contiguous() out = out.view(*(size + [self.out_channels])) else: out = torch.matmul(src, self.weight.squeeze(0)) if self.bias is not None: out += self.bias return out def __repr__(self): return '{}({}, {}, groups={}, bias={})'.format(self.__class__. __name__, self.in_channels, self.out_channels, self.groups, self.bias is not None) class Attention(torch.nn.Module): def __init__(self, dropout=0): super(Attention, self).__init__() self.dropout = dropout def forward(self, query, key, value): assert query.dim() == key.dim() == value.dim() >= 2 assert query.size(-1) == key.size(-1) assert key.size(-2) == value.size(-2) score = torch.matmul(query, key.transpose(-2, -1)) score = score / math.sqrt(key.size(-1)) score = restricted_softmax(score, dim=-1) score = F.dropout(score, p=self.dropout, training=self.training) return torch.matmul(score, value) def __repr__(self): return '{}(dropout={})'.format(self.__class__.__name__, self.dropout) class MultiHeadNew(Attention): def __init__(self, in_channels, out_channels, heads=1, groups=1, dropout=0, bias=True): super(MultiHeadNew, self).__init__(dropout) self.in_channels = in_channels self.out_channels = out_channels self.heads = heads self.groups = groups self.bias = bias assert in_channels % heads == 0 and out_channels % heads == 0 assert in_channels % groups == 0 and out_channels % groups == 0 assert max(groups, self.heads) % min(groups, self.heads) == 0 self.lin_q = Linear(in_channels, out_channels, groups, bias) self.lin_k = Linear(in_channels, out_channels, groups, bias) self.lin_v = Linear(in_channels, out_channels, groups, bias) self.reset_parameters() def reset_parameters(self): self.lin_q.reset_parameters() self.lin_k.reset_parameters() self.lin_v.reset_parameters() def __repr__(self): return '{}({}, {}, heads={}, groups={}, dropout={}, bias={})'.format( self.__class__.__name__, self.in_channels, self.out_channels, self.heads, self.groups, self.dropout, self.bias) def forward(self, input_0, input_1, input_2): primals_4 = self.lin_q.weight primals_5 = self.lin_q.bias primals_6 = self.lin_k.weight primals_7 = self.lin_k.bias primals_8 = self.lin_v.weight primals_9 = self.lin_v.bias primals_1 = input_0 primals_2 = input_1 primals_3 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
CFF-Dream/pytorch_geometric
MultiHead
false
2,045
[ "MIT" ]
0
7c19ad74957409ee9e07314ce81524b3113b9c84
https://github.com/CFF-Dream/pytorch_geometric/tree/7c19ad74957409ee9e07314ce81524b3113b9c84
import math import torch from torch import Tensor from torch.nn import Linear import torch.nn.functional as F from torch.nn import Parameter import torch.utils.data def uniform(size, tensor): bound = 1.0 / math.sqrt(size) if tensor is not None: tensor.data.uniform_(-bound, bound) def kaiming_uniform(tensor, fan, a): if tensor is not None: bound = math.sqrt(6 / ((1 + a ** 2) * fan)) tensor.data.uniform_(-bound, bound) def restricted_softmax(src, dim=-1, margin=0): src_max = torch.clamp(src.max(dim=dim, keepdim=True)[0], min=0) out = (src - src_max).exp() out = out / (out.sum(dim=dim, keepdim=True) + (margin - src_max).exp()) return out class Linear(torch.nn.Module): def __init__(self, in_channels, out_channels, groups=1, bias=True): super().__init__() assert in_channels % groups == 0 and out_channels % groups == 0 self.in_channels = in_channels self.out_channels = out_channels self.groups = groups self.weight = Parameter(Tensor(groups, in_channels // groups, out_channels // groups)) if bias: self.bias = Parameter(torch.Tensor(out_channels)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): kaiming_uniform(self.weight, fan=self.weight.size(1), a=math.sqrt(5)) uniform(self.weight.size(1), self.bias) def forward(self, src): if self.groups > 1: size = list(src.size())[:-1] src = src.view(-1, self.groups, self.in_channels // self.groups) src = src.transpose(0, 1).contiguous() out = torch.matmul(src, self.weight) out = out.transpose(1, 0).contiguous() out = out.view(*(size + [self.out_channels])) else: out = torch.matmul(src, self.weight.squeeze(0)) if self.bias is not None: out += self.bias return out def __repr__(self): return '{}({}, {}, groups={}, bias={})'.format(self.__class__. __name__, self.in_channels, self.out_channels, self.groups, self.bias is not None) class Attention(torch.nn.Module): def __init__(self, dropout=0): super().__init__() self.dropout = dropout def forward(self, query, key, value): assert query.dim() == key.dim() == value.dim() >= 2 assert query.size(-1) == key.size(-1) assert key.size(-2) == value.size(-2) score = torch.matmul(query, key.transpose(-2, -1)) score = score / math.sqrt(key.size(-1)) score = restricted_softmax(score, dim=-1) score = F.dropout(score, p=self.dropout, training=self.training) return torch.matmul(score, value) def __repr__(self): return '{}(dropout={})'.format(self.__class__.__name__, self.dropout) class Model(Attention): def __init__(self, in_channels, out_channels, heads=1, groups=1, dropout=0, bias=True): super().__init__(dropout) self.in_channels = in_channels self.out_channels = out_channels self.heads = heads self.groups = groups self.bias = bias assert in_channels % heads == 0 and out_channels % heads == 0 assert in_channels % groups == 0 and out_channels % groups == 0 assert max(groups, self.heads) % min(groups, self.heads) == 0 self.lin_q = Linear(in_channels, out_channels, groups, bias) self.lin_k = Linear(in_channels, out_channels, groups, bias) self.lin_v = Linear(in_channels, out_channels, groups, bias) self.reset_parameters() def reset_parameters(self): self.lin_q.reset_parameters() self.lin_k.reset_parameters() self.lin_v.reset_parameters() def forward(self, query, key, value): assert query.dim() == key.dim() == value.dim() >= 2 assert query.size(-1) == key.size(-1) == value.size(-1) asse # ... truncated (>4000 chars) for memory efficiency
RSoftmax
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/qz/cqza6p5fjiie2hfiu5dfjqqugrnzziwuwxzlhzy2aa7khopxjbym.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten._softmax] # Source node to ATen node mapping: # x_1 => amax, exp, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%permute, [1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x3), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/v4/cv4nyn2kde7dd2c53ddahw4vtxyldln6pqt62jrliqindkf3sj5m.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten._softmax] # Source node to ATen node mapping: # x_1 => div, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x3), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 16), (64, 16, 256, 1), torch.float32) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten._softmax] stream0 = get_raw_stream(0) triton_poi_fused__softmax_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 buf1 = empty_strided_cuda((4, 4, 1, 16), (64, 16, 16, 1), torch.float32) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf0, buf1, 256, grid=grid(256), stream=stream0) del buf0 return (reinterpret_tensor(buf1, (4, 64), (64, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class RSoftmax(nn.Module): """Radix Softmax module in ``SplitAttentionConv2d``. Args: radix (int): Radix of input. groups (int): Groups of input. """ def __init__(self, radix, groups): super().__init__() self.radix = radix self.groups = groups def forward(self, x): batch = x.size(0) if self.radix > 1: x = x.view(batch, self.groups, self.radix, -1).transpose(1, 2) x = F.softmax(x, dim=1) x = x.reshape(batch, -1) else: x = torch.sigmoid(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'radix': 4, 'groups': 1}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x3, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 16), (64, 16, 256, 1), torch. float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(256)](arg0_1, buf0, 256, XBLOCK= 256, num_warps=4, num_stages=1) del arg0_1 buf1 = empty_strided_cuda((4, 4, 1, 16), (64, 16, 16, 1), torch.float32 ) triton_poi_fused__softmax_1[grid(256)](buf0, buf1, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf0 return reinterpret_tensor(buf1, (4, 64), (64, 1), 0), class RSoftmaxNew(nn.Module): """Radix Softmax module in ``SplitAttentionConv2d``. Args: radix (int): Radix of input. groups (int): Groups of input. """ def __init__(self, radix, groups): super().__init__() self.radix = radix self.groups = groups def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
ALISCIFP/mmpose
RSoftmax
false
2,046
[ "Apache-2.0" ]
0
2433e3dbcc44baa2253e2a7c748ba0216937933e
https://github.com/ALISCIFP/mmpose/tree/2433e3dbcc44baa2253e2a7c748ba0216937933e
import torch import torch.nn as nn import torch.nn.functional as F class Model(nn.Module): """Radix Softmax module in ``SplitAttentionConv2d``. Args: radix (int): Radix of input. groups (int): Groups of input. """ def __init__(self, radix, groups): super().__init__() self.radix = radix self.groups = groups def forward(self, x): batch = x.size(0) if self.radix > 1: x = x.view(batch, self.groups, self.radix, -1).transpose(1, 2) x = F.softmax(x, dim=1) x = x.reshape(batch, -1) else: x = torch.sigmoid(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 1]
Linear
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/bh/cbhpcgxjg3mwo4dulstw5ie26none2yzi5sysdzl34cu6pyah4fg.py # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.add, aten.view] # Source node to ATen node mapping: # out_1 => add, view_3 # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %primals_3), kwargs = {}) # %view_3 : [num_users=1] = call_function[target=torch.ops.aten.reshape.default](args = (%view_2, [4, 4, 4, 4]), kwargs = {}) triton_poi_fused_add_view_0 = async_compile.triton('triton_poi_fused_add_view_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_view_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_view_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x4), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x4), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (1, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [out], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (4, 1), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.add, aten.view] stream0 = get_raw_stream(0) triton_poi_fused_add_view_0.run(buf2, primals_3, 256, grid=grid(256), stream=stream0) del primals_3 return (buf2, reinterpret_tensor(primals_2, (4, 64), (1, 4), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((1, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch from torch import Tensor from torch.nn import Linear from torch.nn import Parameter import torch.utils.data def uniform(size, tensor): bound = 1.0 / math.sqrt(size) if tensor is not None: tensor.data.uniform_(-bound, bound) def kaiming_uniform(tensor, fan, a): if tensor is not None: bound = math.sqrt(6 / ((1 + a ** 2) * fan)) tensor.data.uniform_(-bound, bound) class Linear(torch.nn.Module): def __init__(self, in_channels, out_channels, groups=1, bias=True): super(Linear, self).__init__() assert in_channels % groups == 0 and out_channels % groups == 0 self.in_channels = in_channels self.out_channels = out_channels self.groups = groups self.weight = Parameter(Tensor(groups, in_channels // groups, out_channels // groups)) if bias: self.bias = Parameter(torch.Tensor(out_channels)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): kaiming_uniform(self.weight, fan=self.weight.size(1), a=math.sqrt(5)) uniform(self.weight.size(1), self.bias) def forward(self, src): if self.groups > 1: size = list(src.size())[:-1] src = src.view(-1, self.groups, self.in_channels // self.groups) src = src.transpose(0, 1).contiguous() out = torch.matmul(src, self.weight) out = out.transpose(1, 0).contiguous() out = out.view(*(size + [self.out_channels])) else: out = torch.matmul(src, self.weight.squeeze(0)) if self.bias is not None: out += self.bias return out def __repr__(self): return '{}({}, {}, groups={}, bias={})'.format(self.__class__. __name__, self.in_channels, self.out_channels, self.groups, self.bias is not None) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import math from torch import Tensor from torch.nn import Parameter import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_view_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x4, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x4, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (1, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (4, 1), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 buf2 = buf1 del buf1 get_raw_stream(0) triton_poi_fused_add_view_0[grid(256)](buf2, primals_3, 256, XBLOCK =256, num_warps=4, num_stages=1) del primals_3 return buf2, reinterpret_tensor(primals_2, (4, 64), (1, 4), 0) def uniform(size, tensor): bound = 1.0 / math.sqrt(size) if tensor is not None: tensor.data.uniform_(-bound, bound) def kaiming_uniform(tensor, fan, a): if tensor is not None: bound = math.sqrt(6 / ((1 + a ** 2) * fan)) tensor.data.uniform_(-bound, bound) class LinearNew(torch.nn.Module): def __init__(self, in_channels, out_channels, groups=1, bias=True): super(LinearNew, self).__init__() assert in_channels % groups == 0 and out_channels % groups == 0 self.in_channels = in_channels self.out_channels = out_channels self.groups = groups self.weight = Parameter(Tensor(groups, in_channels // groups, out_channels // groups)) if bias: self.bias = Parameter(torch.Tensor(out_channels)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): kaiming_uniform(self.weight, fan=self.weight.size(1), a=math.sqrt(5)) uniform(self.weight.size(1), self.bias) def __repr__(self): return '{}({}, {}, groups={}, bias={})'.format(self.__class__. __name__, self.in_channels, self.out_channels, self.groups, self.bias is not None) def forward(self, input_0): primals_1 = self.weight primals_3 = self.bias primals_2 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
CFF-Dream/pytorch_geometric
Linear
false
2,047
[ "MIT" ]
0
7c19ad74957409ee9e07314ce81524b3113b9c84
https://github.com/CFF-Dream/pytorch_geometric/tree/7c19ad74957409ee9e07314ce81524b3113b9c84
import math import torch from torch import Tensor from torch.nn import Linear from torch.nn import Parameter import torch.utils.data def uniform(size, tensor): bound = 1.0 / math.sqrt(size) if tensor is not None: tensor.data.uniform_(-bound, bound) def kaiming_uniform(tensor, fan, a): if tensor is not None: bound = math.sqrt(6 / ((1 + a ** 2) * fan)) tensor.data.uniform_(-bound, bound) class Model(torch.nn.Module): def __init__(self, in_channels, out_channels, groups=1, bias=True): super().__init__() assert in_channels % groups == 0 and out_channels % groups == 0 self.in_channels = in_channels self.out_channels = out_channels self.groups = groups self.weight = Parameter(Tensor(groups, in_channels // groups, out_channels // groups)) if bias: self.bias = Parameter(torch.Tensor(out_channels)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): kaiming_uniform(self.weight, fan=self.weight.size(1), a=math.sqrt(5)) uniform(self.weight.size(1), self.bias) def forward(self, src): if self.groups > 1: size = list(src.size())[:-1] src = src.view(-1, self.groups, self.in_channels // self.groups) src = src.transpose(0, 1).contiguous() out = torch.matmul(src, self.weight) out = out.transpose(1, 0).contiguous() out = out.view(*(size + [self.out_channels])) else: out = torch.matmul(src, self.weight.squeeze(0)) if self.bias is not None: out += self.bias return out def __repr__(self): return '{}({}, {}, groups={}, bias={})'.format(self.__class__. __name__, self.in_channels, self.out_channels, self.groups, self.bias is not None) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 4]
ConvBlockINE
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/d5/cd5m3j5auxbszyzvbqqxqrtrkrxq26jxju4ar5ny6zgmqowfs5hg.py # Topologically Sorted Source Nodes: [pad], Original ATen: [aten.copy] # Source node to ATen node mapping: # pad => copy # Graph fragment: # %copy : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_3, %slice_4), kwargs = {}) # %slice_scatter_default : [num_users=1] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_tensor, %copy, 2, 1, 5), kwargs = {}) # %slice_scatter_default_1 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%empty, %slice_scatter_default, 3, 1, 5), kwargs = {}) # %slice_scatter_default_2 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_1, %slice_11, 3, 0, 1), kwargs = {}) # %slice_scatter_default_3 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_2, %slice_16, 3, 5, 6), kwargs = {}) triton_poi_fused_copy_0 = async_compile.triton('triton_poi_fused_copy_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_copy_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_copy_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 6 x1 = (xindex // 6) % 6 x2 = (xindex // 36) x4 = xindex tmp0 = x0 tmp1 = tl.full([1], 5, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = (-4) + x0 tmp4 = tl.full([1], 1, tl.int64) tmp5 = tmp3 < tmp4 tmp6 = tmp5 & tmp2 tmp7 = tmp0 >= tmp4 tmp8 = tmp0 < tmp1 tmp9 = tmp7 & tmp8 tmp10 = tmp9 & tmp6 tmp11 = x1 tmp12 = tmp11 >= tmp4 tmp13 = tmp11 < tmp1 tmp14 = tmp12 & tmp13 tmp15 = tmp14 & tmp10 tmp16 = tl.load(in_ptr0 + ((-5) + x0 + (4*x1) + (16*x2)), tmp15 & xmask, other=0.0) tmp17 = tl.load(in_ptr1 + (x4), tmp10 & xmask, other=0.0) tmp18 = tl.where(tmp14, tmp16, tmp17) tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype) tmp20 = tl.where(tmp10, tmp18, tmp19) tmp21 = float("nan") tmp22 = tl.where(tmp9, tmp20, tmp21) tmp23 = tl.full(tmp22.shape, 0.0, tmp22.dtype) tmp24 = tl.where(tmp6, tmp22, tmp23) tmp25 = tmp3 >= tmp4 tmp26 = tmp3 < tmp1 tmp27 = tmp25 & tmp26 tmp28 = tmp27 & tmp2 tmp29 = tmp14 & tmp28 tmp30 = tl.load(in_ptr0 + ((-9) + x0 + (4*x1) + (16*x2)), tmp29 & xmask, other=0.0) tmp31 = tl.load(in_ptr1 + ((-4) + x4), tmp28 & xmask, other=0.0) tmp32 = tl.where(tmp14, tmp30, tmp31) tmp33 = tl.full(tmp32.shape, 0.0, tmp32.dtype) tmp34 = tl.where(tmp28, tmp32, tmp33) tmp35 = tl.where(tmp27, tmp34, tmp21) tmp36 = tl.where(tmp5, tmp24, tmp35) tmp37 = tl.full(tmp36.shape, 0.0, tmp36.dtype) tmp38 = tl.where(tmp2, tmp36, tmp37) tmp39 = tmp0 < tmp4 tmp40 = 4 + x0 tmp41 = tmp40 >= tmp4 tmp42 = tmp40 < tmp1 tmp43 = tmp41 & tmp42 tmp44 = tmp43 & tmp39 tmp45 = tmp14 & tmp44 tmp46 = tl.load(in_ptr0 + ((-1) + x0 + (4*x1) + (16*x2)), tmp45 & xmask, other=0.0) tmp47 = tl.load(in_ptr1 + (4 + x4), tmp44 & xmask, other=0.0) tmp48 = tl.where(tmp14, tmp46, tmp47) tmp49 = tl.full(tmp48.shape, 0.0, tmp48.dtype) tmp50 = tl.where(tmp44, tmp48, tmp49) tmp51 = tl.where(tmp43, tmp50, tmp21) tmp52 = tl.full(tmp51.shape, 0.0, tmp51.dtype) tmp53 = tl.where(tmp39, tmp51, tmp52) tmp54 = tmp14 & tmp9 tmp55 = tl.load(in_ptr0 + ((-5) + x0 + (4*x1) + (16*x2)), tmp54 & xmask, other=0.0) tmp56 = tl.load(in_ptr1 + (x4), tmp9 & xmask, other=0.0) tmp57 = tl.where(tmp14, tmp55, tmp56) tmp58 = tl.full(tmp57.shape, 0.0, tmp57.dtype) tmp59 = tl.where(tmp9, tmp57, tmp58) tmp60 = tl.where(tmp9, tmp59, tmp21) tmp61 = tl.where(tmp39, tmp53, tmp60) tmp62 = tl.where(tmp2, tmp38, tmp61) tl.store(out_ptr0 + (x4), tmp62, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/w6/cw6our4iotwhi2wfr4hvczz23dzsphqyor2wfejso3djq53u3bto.py # Topologically Sorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: # Graph fragment: # %slice_scatter_default_4 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_3, %slice_21, 2, 0, 1), kwargs = {}) # %slice_scatter_default_5 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_4, %slice_26, 2, 5, 6), kwargs = {}) triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 6) % 6 x0 = xindex % 6 x2 = (xindex // 36) x3 = xindex tmp14 = tl.load(in_ptr0 + (x3), xmask) tmp0 = x1 tmp1 = tl.full([1], 5, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = (-4) + x1 tmp4 = tl.full([1], 1, tl.int64) tmp5 = tmp3 < tmp4 tmp6 = tmp5 & tmp2 tmp7 = tl.load(in_ptr0 + (24 + x0 + (36*x2)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp8 = tl.load(in_ptr0 + ((-24) + x3), tmp2 & xmask, other=0.0) tmp9 = tl.where(tmp5, tmp7, tmp8) tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype) tmp11 = tl.where(tmp2, tmp9, tmp10) tmp12 = tmp0 < tmp4 tmp13 = tl.load(in_ptr0 + (24 + x0 + (36*x2)), tmp12 & xmask, eviction_policy='evict_last', other=0.0) tmp15 = tl.where(tmp12, tmp13, tmp14) tmp16 = tl.where(tmp2, tmp11, tmp15) tl.store(out_ptr0 + (x3), tmp16, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/wq/cwqax6gmmuxvbsug2hkzh7vbsu5as5cmm6jhtilsvpzfpbg5eqe6.py # Topologically Sorted Source Nodes: [x1, x1_2], Original ATen: [aten.convolution, aten._native_batch_norm_legit] # Source node to ATen node mapping: # x1 => convolution # x1_2 => add, rsqrt, var_mean # Graph fragment: # %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%slice_scatter_default_5, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_1, [0, 2, 3]), kwargs = {correction: 0, keepdim: True}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {}) # %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) triton_per_fused__native_batch_norm_legit_convolution_2 = async_compile.triton('triton_per_fused__native_batch_norm_legit_convolution_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[16, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit_convolution_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__native_batch_norm_legit_convolution_2(in_out_ptr0, in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 16 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x3 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (r2 + (16*x3)), xmask, other=0.0) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1, 1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK]) tmp7 = tl.where(xmask, tmp5, 0) tmp8 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp10 = tl.where(xmask, tmp8, 0) tmp11 = tl.sum(tmp10, 1)[:, None] tmp12 = tl.full([XBLOCK, 1], 16, tl.int32) tmp13 = tmp12.to(tl.float32) tmp14 = tmp11 / tmp13 tmp15 = tmp5 - tmp14 tmp16 = tmp15 * tmp15 tmp17 = tl.broadcast_to(tmp16, [XBLOCK, RBLOCK]) tmp19 = tl.where(xmask, tmp17, 0) tmp20 = tl.sum(tmp19, 1)[:, None] tmp21 = 16.0 tmp22 = tmp20 / tmp21 tmp23 = 1e-05 tmp24 = tmp22 + tmp23 tmp25 = libdevice.rsqrt(tmp24) tl.store(in_out_ptr0 + (r2 + (16*x3)), tmp2, xmask) tl.store(out_ptr2 + (x3), tmp25, xmask) tl.store(out_ptr0 + (x3), tmp14, xmask) tl.store(out_ptr1 + (x3), tmp20, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/mo/cmoomc5a3pt633qovhlidladgvdc76et445dajcmk2uradzxvd25.py # Topologically Sorted Source Nodes: [x1_2], Original ATen: [aten.repeat] # Source node to ATen node mapping: # x1_2 => repeat # Graph fragment: # %repeat : [num_users=2] = call_function[target=torch.ops.aten.repeat.default](args = (%primals_4, [4]), kwargs = {}) triton_poi_fused_repeat_3 = async_compile.triton('triton_poi_fused_repeat_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_repeat_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_repeat_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0 % 4), xmask) tl.store(out_ptr0 + (x0), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/f6/cf6ta7e6ecjnfzjbjk52wdkdjr2bbch2wsb2ppiuh7iyvmfthjz6.py # Topologically Sorted Source Nodes: [pad_1], Original ATen: [aten.copy] # Source node to ATen node mapping: # pad_1 => copy_5 # Graph fragment: # %copy_5 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_30, %slice_31), kwargs = {}) # %slice_scatter_default_6 : [num_users=1] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_tensor_1, %copy_5, 2, 1, 5), kwargs = {}) # %slice_scatter_default_7 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%empty_1, %slice_scatter_default_6, 3, 1, 5), kwargs = {}) triton_poi_fused_copy_4 = async_compile.triton('triton_poi_fused_copy_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_copy_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_copy_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 6 x1 = (xindex // 6) % 6 x4 = (xindex // 36) x2 = (xindex // 36) % 4 x6 = xindex tmp0 = x0 tmp1 = tl.full([1], 1, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 5, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = x1 tmp7 = tmp6 >= tmp1 tmp8 = tmp6 < tmp3 tmp9 = tmp7 & tmp8 tmp10 = tmp9 & tmp5 tmp11 = tl.load(in_ptr0 + ((-5) + x0 + (4*x1) + (16*x4)), tmp10 & xmask, other=0.0) tmp12 = tl.full([1], 0, tl.int32) tmp13 = triton_helpers.maximum(tmp12, tmp11) tmp14 = tl.load(in_ptr1 + (x4), tmp10 & xmask, eviction_policy='evict_last', other=0.0) tmp15 = tmp13 - tmp14 tmp16 = tl.load(in_ptr2 + (x4), tmp10 & xmask, eviction_policy='evict_last', other=0.0) tmp17 = 16.0 tmp18 = tmp16 / tmp17 tmp19 = 1e-05 tmp20 = tmp18 + tmp19 tmp21 = libdevice.rsqrt(tmp20) tmp22 = tmp15 * tmp21 tmp23 = tl.load(in_ptr3 + (x4), tmp10 & xmask, eviction_policy='evict_last', other=0.0) tmp24 = tmp22 * tmp23 tmp25 = tl.load(in_ptr4 + (x2), tmp10 & xmask, eviction_policy='evict_last', other=0.0) tmp26 = tmp24 + tmp25 tmp27 = tl.full(tmp26.shape, 0.0, tmp26.dtype) tmp28 = tl.where(tmp10, tmp26, tmp27) tmp29 = tl.load(in_ptr5 + (x6), tmp5 & xmask, other=0.0) tmp30 = tl.where(tmp9, tmp28, tmp29) tmp31 = tl.full(tmp30.shape, 0.0, tmp30.dtype) tmp32 = tl.where(tmp5, tmp30, tmp31) tmp33 = float("nan") tmp34 = tl.where(tmp5, tmp32, tmp33) tl.store(out_ptr0 + (x6), tmp34, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/jj/cjjx3peygg6ek2lf3wzrit7lzq3p7lepppotpxslfrcvc3qigmev.py # Topologically Sorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: # Graph fragment: # %slice_scatter_default_8 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_7, %slice_40, 3, 0, 1), kwargs = {}) # %slice_scatter_default_9 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_8, %slice_46, 3, 5, 6), kwargs = {}) # %slice_scatter_default_10 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_9, %slice_52, 2, 0, 1), kwargs = {}) triton_poi_fused_5 = async_compile.triton('triton_poi_fused_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_5(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 6) % 6 x0 = xindex % 6 x3 = (xindex // 6) x4 = xindex tmp30 = tl.load(in_ptr0 + (x4), xmask) tmp0 = x1 tmp1 = tl.full([1], 1, tl.int64) tmp2 = tmp0 < tmp1 tmp3 = x0 tmp4 = tl.full([1], 5, tl.int64) tmp5 = tmp3 >= tmp4 tmp6 = tmp5 & tmp2 tmp7 = (-4) + x0 tmp8 = tmp7 < tmp1 tmp9 = tmp8 & tmp6 tmp10 = tl.load(in_ptr0 + (28 + (6*x3)), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tl.load(in_ptr0 + (20 + x4), tmp6 & xmask, other=0.0) tmp12 = tl.where(tmp8, tmp10, tmp11) tmp13 = tl.full(tmp12.shape, 0.0, tmp12.dtype) tmp14 = tl.where(tmp6, tmp12, tmp13) tmp15 = tmp3 < tmp1 tmp16 = tmp15 & tmp2 tmp17 = tl.load(in_ptr0 + (28 + (6*x3)), tmp16 & xmask, eviction_policy='evict_last', other=0.0) tmp18 = tl.load(in_ptr0 + (24 + x4), tmp2 & xmask, other=0.0) tmp19 = tl.where(tmp15, tmp17, tmp18) tmp20 = tl.where(tmp5, tmp14, tmp19) tmp21 = tl.full(tmp20.shape, 0.0, tmp20.dtype) tmp22 = tl.where(tmp2, tmp20, tmp21) tmp23 = tmp8 & tmp5 tmp24 = tl.load(in_ptr0 + (4 + (6*x3)), tmp23 & xmask, eviction_policy='evict_last', other=0.0) tmp25 = tl.load(in_ptr0 + ((-4) + x4), tmp5 & xmask, other=0.0) tmp26 = tl.where(tmp8, tmp24, tmp25) tmp27 = tl.full(tmp26.shape, 0.0, tmp26.dtype) tmp28 = tl.where(tmp5, tmp26, tmp27) tmp29 = tl.load(in_ptr0 + (4 + (6*x3)), tmp15 & xmask, eviction_policy='evict_last', other=0.0) tmp31 = tl.where(tmp15, tmp29, tmp30) tmp32 = tl.where(tmp5, tmp28, tmp31) tmp33 = tl.where(tmp2, tmp22, tmp32) tl.store(out_ptr0 + (x4), tmp33, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/v4/cv4iuanrzw7zmz6wm4wv4b4dlm2viyhtglso5ivforh46zglspnt.py # Topologically Sorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: # Graph fragment: # %slice_scatter_default_11 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_10, %slice_58, 2, 5, 6), kwargs = {}) triton_poi_fused_6 = async_compile.triton('triton_poi_fused_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_6(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 6) % 6 x0 = xindex % 6 x2 = (xindex // 36) x3 = xindex tmp4 = tl.load(in_ptr0 + (x3), xmask) tmp0 = x1 tmp1 = tl.full([1], 5, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.load(in_ptr0 + (6 + x0 + (36*x2)), tmp2 & xmask, eviction_policy='evict_last', other=0.0) tmp5 = tl.where(tmp2, tmp3, tmp4) tl.store(out_ptr0 + (x3), tmp5, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/fi/cfispebfr3n4jaxzjla4s2o3ulk7nf6tdy6tawo72eedzbp37ef7.py # Topologically Sorted Source Nodes: [x1_3, out], Original ATen: [aten.convolution, aten.repeat, aten._native_batch_norm_legit] # Source node to ATen node mapping: # out => add_2, add_3, mul_2, mul_3, repeat_2, rsqrt_1, sub_1, var_mean_1 # x1_3 => convolution_1 # Graph fragment: # %convolution_1 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%slice_scatter_default_11, %primals_6, %primals_7, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %repeat_2 : [num_users=2] = call_function[target=torch.ops.aten.repeat.default](args = (%primals_8, [4]), kwargs = {}) # %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_4, [0, 2, 3]), kwargs = {correction: 0, keepdim: True}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-05), kwargs = {}) # %rsqrt_1 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_2,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_4, %getitem_3), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %rsqrt_1), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_2, %unsqueeze_5), kwargs = {}) # %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_3, %unsqueeze_7), kwargs = {}) triton_per_fused__native_batch_norm_legit_convolution_repeat_7 = async_compile.triton('triton_per_fused__native_batch_norm_legit_convolution_repeat_7', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[16, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: 'i32', 9: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit_convolution_repeat_7', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__native_batch_norm_legit_convolution_repeat_7(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr3, out_ptr4, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 16 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) x0 = xindex r3 = rindex x1 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x0 % 4), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_out_ptr0 + (r3 + (16*x0)), xmask, other=0.0) tmp2 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp30 = tl.load(in_ptr2 + (x0 % 4), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tl.full([1, 1], 0, tl.int32) tmp5 = triton_helpers.maximum(tmp4, tmp3) tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp8 = tl.where(xmask, tmp6, 0) tmp9 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp11 = tl.where(xmask, tmp9, 0) tmp12 = tl.sum(tmp11, 1)[:, None] tmp13 = tl.full([XBLOCK, 1], 16, tl.int32) tmp14 = tmp13.to(tl.float32) tmp15 = tmp12 / tmp14 tmp16 = tmp6 - tmp15 tmp17 = tmp16 * tmp16 tmp18 = tl.broadcast_to(tmp17, [XBLOCK, RBLOCK]) tmp20 = tl.where(xmask, tmp18, 0) tmp21 = tl.sum(tmp20, 1)[:, None] tmp22 = tmp5 - tmp15 tmp23 = 16.0 tmp24 = tmp21 / tmp23 tmp25 = 1e-05 tmp26 = tmp24 + tmp25 tmp27 = libdevice.rsqrt(tmp26) tmp28 = tmp22 * tmp27 tmp29 = tmp28 * tmp0 tmp31 = tmp29 + tmp30 tl.store(out_ptr0 + (x0), tmp0, xmask) tl.store(in_out_ptr0 + (r3 + (16*x0)), tmp3, xmask) tl.store(out_ptr3 + (r3 + (16*x0)), tmp31, xmask) tl.store(out_ptr4 + (x0), tmp27, xmask) tl.store(out_ptr1 + (x0), tmp15, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, ), (1, )) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_7, (4, ), (1, )) assert_size_stride(primals_8, (4, ), (1, )) assert_size_stride(primals_9, (4, ), (1, )) buf0 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32) # Topologically Sorted Source Nodes: [pad], Original ATen: [aten.copy] stream0 = get_raw_stream(0) triton_poi_fused_copy_0.run(primals_3, buf0, buf1, 576, grid=grid(576), stream=stream0) del primals_3 buf2 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [], Original ATen: [] triton_poi_fused_1.run(buf1, buf2, 576, grid=grid(576), stream=stream0) # Topologically Sorted Source Nodes: [x1], Original ATen: [aten.convolution] buf3 = extern_kernels.convolution(buf2, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1)) buf4 = buf3; del buf3 # reuse buf6 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32) buf7 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32) buf9 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32) # Topologically Sorted Source Nodes: [x1, x1_2], Original ATen: [aten.convolution, aten._native_batch_norm_legit] triton_per_fused__native_batch_norm_legit_convolution_2.run(buf4, primals_2, buf6, buf7, buf9, 16, 16, grid=grid(16), stream=stream0) del primals_2 buf5 = empty_strided_cuda((16, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [x1_2], Original ATen: [aten.repeat] triton_poi_fused_repeat_3.run(primals_4, buf5, 16, grid=grid(16), stream=stream0) del primals_4 buf10 = buf1; del buf1 # reuse buf11 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32) # Topologically Sorted Source Nodes: [pad_1], Original ATen: [aten.copy] triton_poi_fused_copy_4.run(buf4, buf6, buf7, buf5, primals_5, buf10, buf11, 576, grid=grid(576), stream=stream0) del primals_5 buf12 = buf10; del buf10 # reuse # Topologically Sorted Source Nodes: [], Original ATen: [] triton_poi_fused_5.run(buf11, buf12, 576, grid=grid(576), stream=stream0) buf13 = buf11; del buf11 # reuse # Topologically Sorted Source Nodes: [], Original ATen: [] triton_poi_fused_6.run(buf12, buf13, 576, grid=grid(576), stream=stream0) del buf12 # Topologically Sorted Source Nodes: [x1_3], Original ATen: [aten.convolution] buf14 = extern_kernels.convolution(buf13, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf14, (4, 4, 4, 4), (64, 16, 4, 1)) buf16 = reinterpret_tensor(buf7, (16, ), (1, ), 0); del buf7 # reuse buf15 = buf14; del buf14 # reuse buf17 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32) buf21 = empty_strided_cuda((1, 16, 4, 4), (256, 16, 4, 1), torch.float32) buf20 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32) # Topologically Sorted Source Nodes: [x1_3, out], Original ATen: [aten.convolution, aten.repeat, aten._native_batch_norm_legit] triton_per_fused__native_batch_norm_legit_convolution_repeat_7.run(buf15, primals_8, primals_7, primals_9, buf16, buf17, buf21, buf20, 16, 16, grid=grid(16), stream=stream0) del primals_7 del primals_8 del primals_9 return (reinterpret_tensor(buf21, (4, 4, 4, 4), (64, 16, 4, 1), 0), primals_1, primals_6, buf2, buf4, buf5, reinterpret_tensor(buf9, (16, ), (1, ), 0), buf13, buf15, buf16, reinterpret_tensor(buf20, (16, ), (1, ), 0), reinterpret_tensor(buf17, (1, 16, 1, 1), (16, 1, 1, 1), 0), reinterpret_tensor(buf6, (1, 16, 1, 1), (16, 1, 1, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn from torch.nn import init as init class ConvBlockINE(nn.Module): def __init__(self, in_ch, out_ch, act='relu', ksize=3): super().__init__() padding = (ksize - 1) // 2 if act == 'lrelu': self.act = nn.LeakyReLU(0.2, True) else: self.act = nn.ReLU(True) self.conv1 = nn.Conv2d(in_ch, out_ch, kernel_size=ksize, padding= padding, padding_mode='circular') self.conv2 = nn.Conv2d(out_ch, out_ch, kernel_size=ksize, padding= padding, padding_mode='circular') self.norm1 = nn.InstanceNorm2d(out_ch, affine=True) self.norm2 = nn.InstanceNorm2d(out_ch, affine=True) def forward(self, x, g=None, b=None): x1 = self.conv1(x) x1 = self.act(x1) x1 = self.norm1(x1) x1 = self.conv2(x1) x1 = self.act(x1) out = self.norm2(x1) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_ch': 4, 'out_ch': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice from torch import nn from torch.nn import init as init assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_copy_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 6 x1 = xindex // 6 % 6 x2 = xindex // 36 x4 = xindex tmp0 = x0 tmp1 = tl.full([1], 5, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = -4 + x0 tmp4 = tl.full([1], 1, tl.int64) tmp5 = tmp3 < tmp4 tmp6 = tmp5 & tmp2 tmp7 = tmp0 >= tmp4 tmp8 = tmp0 < tmp1 tmp9 = tmp7 & tmp8 tmp10 = tmp9 & tmp6 tmp11 = x1 tmp12 = tmp11 >= tmp4 tmp13 = tmp11 < tmp1 tmp14 = tmp12 & tmp13 tmp15 = tmp14 & tmp10 tmp16 = tl.load(in_ptr0 + (-5 + x0 + 4 * x1 + 16 * x2), tmp15 & xmask, other=0.0) tmp17 = tl.load(in_ptr1 + x4, tmp10 & xmask, other=0.0) tmp18 = tl.where(tmp14, tmp16, tmp17) tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype) tmp20 = tl.where(tmp10, tmp18, tmp19) tmp21 = float('nan') tmp22 = tl.where(tmp9, tmp20, tmp21) tmp23 = tl.full(tmp22.shape, 0.0, tmp22.dtype) tmp24 = tl.where(tmp6, tmp22, tmp23) tmp25 = tmp3 >= tmp4 tmp26 = tmp3 < tmp1 tmp27 = tmp25 & tmp26 tmp28 = tmp27 & tmp2 tmp29 = tmp14 & tmp28 tmp30 = tl.load(in_ptr0 + (-9 + x0 + 4 * x1 + 16 * x2), tmp29 & xmask, other=0.0) tmp31 = tl.load(in_ptr1 + (-4 + x4), tmp28 & xmask, other=0.0) tmp32 = tl.where(tmp14, tmp30, tmp31) tmp33 = tl.full(tmp32.shape, 0.0, tmp32.dtype) tmp34 = tl.where(tmp28, tmp32, tmp33) tmp35 = tl.where(tmp27, tmp34, tmp21) tmp36 = tl.where(tmp5, tmp24, tmp35) tmp37 = tl.full(tmp36.shape, 0.0, tmp36.dtype) tmp38 = tl.where(tmp2, tmp36, tmp37) tmp39 = tmp0 < tmp4 tmp40 = 4 + x0 tmp41 = tmp40 >= tmp4 tmp42 = tmp40 < tmp1 tmp43 = tmp41 & tmp42 tmp44 = tmp43 & tmp39 tmp45 = tmp14 & tmp44 tmp46 = tl.load(in_ptr0 + (-1 + x0 + 4 * x1 + 16 * x2), tmp45 & xmask, other=0.0) tmp47 = tl.load(in_ptr1 + (4 + x4), tmp44 & xmask, other=0.0) tmp48 = tl.where(tmp14, tmp46, tmp47) tmp49 = tl.full(tmp48.shape, 0.0, tmp48.dtype) tmp50 = tl.where(tmp44, tmp48, tmp49) tmp51 = tl.where(tmp43, tmp50, tmp21) tmp52 = tl.full(tmp51.shape, 0.0, tmp51.dtype) tmp53 = tl.where(tmp39, tmp51, tmp52) tmp54 = tmp14 & tmp9 tmp55 = tl.load(in_ptr0 + (-5 + x0 + 4 * x1 + 16 * x2), tmp54 & xmask, other=0.0) tmp56 = tl.load(in_ptr1 + x4, tmp9 & xmask, other=0.0) tmp57 = tl.where(tmp14, tmp55, tmp56) tmp58 = tl.full(tmp57.shape, 0.0, tmp57.dtype) tmp59 = tl.where(tmp9, tmp57, tmp58) tmp60 = tl.where(tmp9, tmp59, tmp21) tmp61 = tl.where(tmp39, tmp53, tmp60) tmp62 = tl.where(tmp2, tmp38, tmp61) tl.store(out_ptr0 + x4, tmp62, xmask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 6 % 6 x0 = xindex % 6 x2 = xindex // 36 x3 = xindex tmp14 = tl.load(in_ptr0 + x3, xmask) tmp0 = x1 tmp1 = tl.full([1], 5, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = -4 + x1 tmp4 = tl.full([1], 1, tl.int64) tmp5 = tmp3 < tmp4 tmp6 = tmp5 & tmp2 tmp7 = tl.load(in_ptr0 + (24 + x0 + 36 * x2), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp8 = tl.load(in_ptr0 + (-24 + x3), tmp2 & xmask, other=0.0) tmp9 = tl.where(tmp5, tmp7, tmp8) tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype) tmp11 = tl.where(tmp2, tmp9, tmp10) tmp12 = tmp0 < tmp4 tmp13 = tl.load(in_ptr0 + (24 + x0 + 36 * x2), tmp12 & xmask, eviction_policy='evict_last', other=0.0) tmp15 = tl.where(tmp12, tmp13, tmp14) tmp16 = tl.where(tmp2, tmp11, tmp15) tl.store(out_ptr0 + x3, tmp16, xmask) @triton.jit def triton_per_fused__native_batch_norm_legit_convolution_2(in_out_ptr0, in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr ): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x3 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (r2 + 16 * x3), xmask, other=0.0) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1, 1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK]) tl.where(xmask, tmp5, 0) tmp8 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp10 = tl.where(xmask, tmp8, 0) tmp11 = tl.sum(tmp10, 1)[:, None] tmp12 = tl.full([XBLOCK, 1], 16, tl.int32) tmp13 = tmp12.to(tl.float32) tmp14 = tmp11 / tmp13 tmp15 = tmp5 - tmp14 tmp16 = tmp15 * tmp15 tmp17 = tl.broadcast_to(tmp16, [XBLOCK, RBLOCK]) tmp19 = tl.where(xmask, tmp17, 0) tmp20 = tl.sum(tmp19, 1)[:, None] tmp21 = 16.0 tmp22 = tmp20 / tmp21 tmp23 = 1e-05 tmp24 = tmp22 + tmp23 tmp25 = libdevice.rsqrt(tmp24) tl.store(in_out_ptr0 + (r2 + 16 * x3), tmp2, xmask) tl.store(out_ptr2 + x3, tmp25, xmask) tl.store(out_ptr0 + x3, tmp14, xmask) tl.store(out_ptr1 + x3, tmp20, xmask) @triton.jit def triton_poi_fused_repeat_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0 % 4, xmask) tl.store(out_ptr0 + x0, tmp0, xmask) @triton.jit def triton_poi_fused_copy_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 6 x1 = xindex // 6 % 6 x4 = xindex // 36 x2 = xindex // 36 % 4 x6 = xindex tmp0 = x0 tmp1 = tl.full([1], 1, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 5, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = x1 tmp7 = tmp6 >= tmp1 tmp8 = tmp6 < tmp3 tmp9 = tmp7 & tmp8 tmp10 = tmp9 & tmp5 tmp11 = tl.load(in_ptr0 + (-5 + x0 + 4 * x1 + 16 * x4), tmp10 & xmask, other=0.0) tmp12 = tl.full([1], 0, tl.int32) tmp13 = triton_helpers.maximum(tmp12, tmp11) tmp14 = tl.load(in_ptr1 + x4, tmp10 & xmask, eviction_policy= 'evict_last', other=0.0) tmp15 = tmp13 - tmp14 tmp16 = tl.load(in_ptr2 + x4, tmp10 & xmask, eviction_policy= 'evict_last', other=0.0) tmp17 = 16.0 tmp18 = tmp16 / tmp17 tmp19 = 1e-05 tmp20 = tmp18 + tmp19 tmp21 = libdevice.rsqrt(tmp20) tmp22 = tmp15 * tmp21 tmp23 = tl.load(in_ptr3 + x4, tmp10 & xmask, eviction_policy= 'evict_last', other=0.0) tmp24 = tmp22 * tmp23 tmp25 = tl.load(in_ptr4 + x2, tmp10 & xmask, eviction_policy= 'evict_last', other=0.0) tmp26 = tmp24 + tmp25 tmp27 = tl.full(tmp26.shape, 0.0, tmp26.dtype) tmp28 = tl.where(tmp10, tmp26, tmp27) tmp29 = tl.load(in_ptr5 + x6, tmp5 & xmask, other=0.0) tmp30 = tl.where(tmp9, tmp28, tmp29) tmp31 = tl.full(tmp30.shape, 0.0, tmp30.dtype) tmp32 = tl.where(tmp5, tmp30, tmp31) tmp33 = float('nan') tmp34 = tl.where(tmp5, tmp32, tmp33) tl.store(out_ptr0 + x6, tmp34, xmask) @triton.jit def triton_poi_fused_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 6 % 6 x0 = xindex % 6 x3 = xindex // 6 x4 = xindex tmp30 = tl.load(in_ptr0 + x4, xmask) tmp0 = x1 tmp1 = tl.full([1], 1, tl.int64) tmp2 = tmp0 < tmp1 tmp3 = x0 tmp4 = tl.full([1], 5, tl.int64) tmp5 = tmp3 >= tmp4 tmp6 = tmp5 & tmp2 tmp7 = -4 + x0 tmp8 = tmp7 < tmp1 tmp9 = tmp8 & tmp6 tmp10 = tl.load(in_ptr0 + (28 + 6 * x3), tmp9 & xmask, eviction_policy= 'evict_last', other=0.0) tmp11 = tl.load(in_ptr0 + (20 + x4), tmp6 & xmask, other=0.0) tmp12 = tl.where(tmp8, tmp10, tmp11) tmp13 = tl.full(tmp12.shape, 0.0, tmp12.dtype) tmp14 = tl.where(tmp6, tmp12, tmp13) tmp15 = tmp3 < tmp1 tmp16 = tmp15 & tmp2 tmp17 = tl.load(in_ptr0 + (28 + 6 * x3), tmp16 & xmask, eviction_policy ='evict_last', other=0.0) tmp18 = tl.load(in_ptr0 + (24 + x4), tmp2 & xmask, other=0.0) tmp19 = tl.where(tmp15, tmp17, tmp18) tmp20 = tl.where(tmp5, tmp14, tmp19) tmp21 = tl.full(tmp20.shape, 0.0, tmp20.dtype) tmp22 = tl.where(tmp2, tmp20, tmp21) tmp23 = tmp8 & tmp5 tmp24 = tl.load(in_ptr0 + (4 + 6 * x3), tmp23 & xmask, eviction_policy= 'evict_last', other=0.0) tmp25 = tl.load(in_ptr0 + (-4 + x4), tmp5 & xmask, other=0.0) tmp26 = tl.where(tmp8, tmp24, tmp25) tmp27 = tl.full(tmp26.shape, 0.0, tmp26.dtype) tmp28 = tl.where(tmp5, tmp26, tmp27) tmp29 = tl.load(in_ptr0 + (4 + 6 * x3), tmp15 & xmask, eviction_policy= 'evict_last', other=0.0) tmp31 = tl.where(tmp15, tmp29, tmp30) tmp32 = tl.where(tmp5, tmp28, tmp31) tmp33 = tl.where(tmp2, tmp22, tmp32) tl.store(out_ptr0 + x4, tmp33, xmask) @triton.jit def triton_poi_fused_6(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 6 % 6 x0 = xindex % 6 x2 = xindex // 36 x3 = xindex tmp4 = tl.load(in_ptr0 + x3, xmask) tmp0 = x1 tmp1 = tl.full([1], 5, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.load(in_ptr0 + (6 + x0 + 36 * x2), tmp2 & xmask, eviction_policy='evict_last', other=0.0) tmp5 = tl.where(tmp2, tmp3, tmp4) tl.store(out_ptr0 + x3, tmp5, xmask) @triton.jit def triton_per_fused__native_batch_norm_legit_convolution_repeat_7(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr3, out_ptr4, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) x0 = xindex r3 = rindex x1 = xindex % 4 tmp0 = tl.load(in_ptr0 + x0 % 4, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_out_ptr0 + (r3 + 16 * x0), xmask, other=0.0) tmp2 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp30 = tl.load(in_ptr2 + x0 % 4, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tl.full([1, 1], 0, tl.int32) tmp5 = triton_helpers.maximum(tmp4, tmp3) tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tl.where(xmask, tmp6, 0) tmp9 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp11 = tl.where(xmask, tmp9, 0) tmp12 = tl.sum(tmp11, 1)[:, None] tmp13 = tl.full([XBLOCK, 1], 16, tl.int32) tmp14 = tmp13.to(tl.float32) tmp15 = tmp12 / tmp14 tmp16 = tmp6 - tmp15 tmp17 = tmp16 * tmp16 tmp18 = tl.broadcast_to(tmp17, [XBLOCK, RBLOCK]) tmp20 = tl.where(xmask, tmp18, 0) tmp21 = tl.sum(tmp20, 1)[:, None] tmp22 = tmp5 - tmp15 tmp23 = 16.0 tmp24 = tmp21 / tmp23 tmp25 = 1e-05 tmp26 = tmp24 + tmp25 tmp27 = libdevice.rsqrt(tmp26) tmp28 = tmp22 * tmp27 tmp29 = tmp28 * tmp0 tmp31 = tmp29 + tmp30 tl.store(out_ptr0 + x0, tmp0, xmask) tl.store(in_out_ptr0 + (r3 + 16 * x0), tmp3, xmask) tl.store(out_ptr3 + (r3 + 16 * x0), tmp31, xmask) tl.store(out_ptr4 + x0, tmp27, xmask) tl.store(out_ptr1 + x0, tmp15, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (4,), (1,)) buf0 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32) get_raw_stream(0) triton_poi_fused_copy_0[grid(576)](primals_3, buf0, buf1, 576, XBLOCK=256, num_warps=4, num_stages=1) del primals_3 buf2 = buf0 del buf0 triton_poi_fused_1[grid(576)](buf1, buf2, 576, XBLOCK=256, num_warps=4, num_stages=1) buf3 = extern_kernels.convolution(buf2, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1)) buf4 = buf3 del buf3 buf6 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32 ) buf7 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32 ) buf9 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32 ) triton_per_fused__native_batch_norm_legit_convolution_2[grid(16)](buf4, primals_2, buf6, buf7, buf9, 16, 16, XBLOCK=8, num_warps=2, num_stages=1) del primals_2 buf5 = empty_strided_cuda((16,), (1,), torch.float32) triton_poi_fused_repeat_3[grid(16)](primals_4, buf5, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_4 buf10 = buf1 del buf1 buf11 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32 ) triton_poi_fused_copy_4[grid(576)](buf4, buf6, buf7, buf5, primals_5, buf10, buf11, 576, XBLOCK=128, num_warps=4, num_stages=1 ) del primals_5 buf12 = buf10 del buf10 triton_poi_fused_5[grid(576)](buf11, buf12, 576, XBLOCK=256, num_warps=4, num_stages=1) buf13 = buf11 del buf11 triton_poi_fused_6[grid(576)](buf12, buf13, 576, XBLOCK=256, num_warps=4, num_stages=1) del buf12 buf14 = extern_kernels.convolution(buf13, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf14, (4, 4, 4, 4), (64, 16, 4, 1)) buf16 = reinterpret_tensor(buf7, (16,), (1,), 0) del buf7 buf15 = buf14 del buf14 buf17 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch. float32) buf21 = empty_strided_cuda((1, 16, 4, 4), (256, 16, 4, 1), torch. float32) buf20 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch. float32) triton_per_fused__native_batch_norm_legit_convolution_repeat_7[grid(16) ](buf15, primals_8, primals_7, primals_9, buf16, buf17, buf21, buf20, 16, 16, XBLOCK=8, num_warps=2, num_stages=1) del primals_7 del primals_8 del primals_9 return reinterpret_tensor(buf21, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), primals_1, primals_6, buf2, buf4, buf5, reinterpret_tensor(buf9, (16,), (1,), 0), buf13, buf15, buf16, reinterpret_tensor(buf20, (16 ,), (1,), 0), reinterpret_tensor(buf17, (1, 16, 1, 1), (16, 1, 1, 1), 0 ), reinterpret_tensor(buf6, (1, 16, 1, 1), (16, 1, 1, 1), 0) class ConvBlockINENew(nn.Module): def __init__(self, in_ch, out_ch, act='relu', ksize=3): super().__init__() padding = (ksize - 1) // 2 if act == 'lrelu': self.act = nn.LeakyReLU(0.2, True) else: self.act = nn.ReLU(True) self.conv1 = nn.Conv2d(in_ch, out_ch, kernel_size=ksize, padding= padding, padding_mode='circular') self.conv2 = nn.Conv2d(out_ch, out_ch, kernel_size=ksize, padding= padding, padding_mode='circular') self.norm1 = nn.InstanceNorm2d(out_ch, affine=True) self.norm2 = nn.InstanceNorm2d(out_ch, affine=True) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_6 = self.conv2.weight primals_4 = self.conv2.bias primals_5 = self.norm1.weight primals_7 = self.norm1.bias primals_8 = self.norm2.weight primals_9 = self.norm2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
BaekduChoi/Halftoning_v2
ConvBlockINE
false
2,048
[ "BSD-3-Clause" ]
0
fdb7040e1a4044f23ef9c92757bbb90c23685afe
https://github.com/BaekduChoi/Halftoning_v2/tree/fdb7040e1a4044f23ef9c92757bbb90c23685afe
import torch from torch import nn from torch.nn import init as init class Model(nn.Module): def __init__(self, in_ch, out_ch, act='relu', ksize=3): super().__init__() padding = (ksize - 1) // 2 if act == 'lrelu': self.act = nn.LeakyReLU(0.2, True) else: self.act = nn.ReLU(True) self.conv1 = nn.Conv2d(in_ch, out_ch, kernel_size=ksize, padding= padding, padding_mode='circular') self.conv2 = nn.Conv2d(out_ch, out_ch, kernel_size=ksize, padding= padding, padding_mode='circular') self.norm1 = nn.InstanceNorm2d(out_ch, affine=True) self.norm2 = nn.InstanceNorm2d(out_ch, affine=True) def forward(self, x, g=None, b=None): x1 = self.conv1(x) x1 = self.act(x1) x1 = self.norm1(x1) x1 = self.conv2(x1) x1 = self.act(x1) out = self.norm2(x1) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 4]
MSELoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/7p/c7po76azmxaenhqdhxmjkwmmyrh5jpr4p5zydgfurvfy3q5qm6gp.py # Topologically Sorted Source Nodes: [loss, mul], Original ATen: [aten.mse_loss, aten.mul] # Source node to ATen node mapping: # loss => mean, pow_1, sub # mul => mul # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %arg0_1), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%pow_1,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean, 1.0), kwargs = {}) triton_per_fused_mse_loss_mul_0 = async_compile.triton('triton_per_fused_mse_loss_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mse_loss_mul_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_mse_loss_mul_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.load(in_ptr1 + (r0), None) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp4 = tl.broadcast_to(tmp3, [RBLOCK]) tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0)) tmp7 = 256.0 tmp8 = tmp6 / tmp7 tmp9 = 1.0 tmp10 = tmp8 * tmp9 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp10, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [loss, mul], Original ATen: [aten.mse_loss, aten.mul] stream0 = get_raw_stream(0) triton_per_fused_mse_loss_mul_0.run(buf1, arg1_1, arg0_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class MSELoss(nn.Module): """MSE loss for coordinate regression.""" def __init__(self, use_target_weight=False, loss_weight=1.0): super().__init__() self.criterion = F.mse_loss self.use_target_weight = use_target_weight self.loss_weight = loss_weight def forward(self, output, target, target_weight=None): """Forward function. Note: - batch_size: N - num_keypoints: K Args: output (torch.Tensor[N, K, 2]): Output regression. target (torch.Tensor[N, K, 2]): Target regression. target_weight (torch.Tensor[N, K, 2]): Weights across different joint types. """ if self.use_target_weight: assert target_weight is not None loss = self.criterion(output * target_weight, target * target_weight) else: loss = self.criterion(output, target) return loss * self.loss_weight def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_mse_loss_mul_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp4 = tl.broadcast_to(tmp3, [RBLOCK]) tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0)) tmp7 = 256.0 tmp8 = tmp6 / tmp7 tmp9 = 1.0 tmp10 = tmp8 * tmp9 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp10, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_mse_loss_mul_0[grid(1)](buf1, arg1_1, arg0_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class MSELossNew(nn.Module): """MSE loss for coordinate regression.""" def __init__(self, use_target_weight=False, loss_weight=1.0): super().__init__() self.criterion = F.mse_loss self.use_target_weight = use_target_weight self.loss_weight = loss_weight def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
ALISCIFP/mmpose
MSELoss
false
2,049
[ "Apache-2.0" ]
0
2433e3dbcc44baa2253e2a7c748ba0216937933e
https://github.com/ALISCIFP/mmpose/tree/2433e3dbcc44baa2253e2a7c748ba0216937933e
import torch import torch.nn as nn import torch.nn.functional as F class Model(nn.Module): """MSE loss for coordinate regression.""" def __init__(self, use_target_weight=False, loss_weight=1.0): super().__init__() self.criterion = F.mse_loss self.use_target_weight = use_target_weight self.loss_weight = loss_weight def forward(self, output, target, target_weight=None): """Forward function. Note: - batch_size: N - num_keypoints: K Args: output (torch.Tensor[N, K, 2]): Output regression. target (torch.Tensor[N, K, 2]): Target regression. target_weight (torch.Tensor[N, K, 2]): Weights across different joint types. """ if self.use_target_weight: assert target_weight is not None loss = self.criterion(output * target_weight, target * target_weight) else: loss = self.criterion(output, target) return loss * self.loss_weight def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
GlobalAveragePooling
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/l3/cl35tzbhrd24dhunkbb6gjs54aklpyr46oikqhoylcgmkcmhujil.py # Topologically Sorted Source Nodes: [outs], Original ATen: [aten.mean] # Source node to ATen node mapping: # outs => mean # Graph fragment: # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%arg0_1, [-1, -2], True), kwargs = {}) triton_per_fused_mean_0 = async_compile.triton('triton_per_fused_mean_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[16, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 16 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp5 = 16.0 tmp6 = tmp4 / tmp5 tl.debug_barrier() tl.store(in_out_ptr0 + (x0), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [outs], Original ATen: [aten.mean] stream0 = get_raw_stream(0) triton_per_fused_mean_0.run(buf1, arg0_1, 16, 16, grid=grid(16), stream=stream0) del arg0_1 return (reinterpret_tensor(buf1, (4, 4), (4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class GlobalAveragePooling(nn.Module): """Global Average Pooling neck. Note that we use `view` to remove extra channel after pooling. We do not use `squeeze` as it will also remove the batch dimension when the tensor has a batch dimension of size 1, which can lead to unexpected errors. """ def __init__(self): super().__init__() self.gap = nn.AdaptiveAvgPool2d((1, 1)) def init_weights(self): pass def forward(self, inputs): if isinstance(inputs, tuple): outs = tuple([self.gap(x) for x in inputs]) outs = tuple([out.view(x.size(0), -1) for out, x in zip(outs, inputs)]) elif isinstance(inputs, list): outs = [self.gap(x) for x in inputs] outs = [out.view(x.size(0), -1) for out, x in zip(outs, inputs)] elif isinstance(inputs, torch.Tensor): outs = self.gap(inputs) outs = outs.view(inputs.size(0), -1) else: raise TypeError('neck inputs should be tuple or torch.tensor') return outs def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp5 = 16.0 tmp6 = tmp4 / tmp5 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp6, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_mean_0[grid(16)](buf1, arg0_1, 16, 16, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 return reinterpret_tensor(buf1, (4, 4), (4, 1), 0), class GlobalAveragePoolingNew(nn.Module): """Global Average Pooling neck. Note that we use `view` to remove extra channel after pooling. We do not use `squeeze` as it will also remove the batch dimension when the tensor has a batch dimension of size 1, which can lead to unexpected errors. """ def __init__(self): super().__init__() self.gap = nn.AdaptiveAvgPool2d((1, 1)) def init_weights(self): pass def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
ALISCIFP/mmpose
GlobalAveragePooling
false
2,050
[ "Apache-2.0" ]
0
2433e3dbcc44baa2253e2a7c748ba0216937933e
https://github.com/ALISCIFP/mmpose/tree/2433e3dbcc44baa2253e2a7c748ba0216937933e
import torch import torch.nn as nn class Model(nn.Module): """Global Average Pooling neck. Note that we use `view` to remove extra channel after pooling. We do not use `squeeze` as it will also remove the batch dimension when the tensor has a batch dimension of size 1, which can lead to unexpected errors. """ def __init__(self): super().__init__() self.gap = nn.AdaptiveAvgPool2d((1, 1)) def init_weights(self): pass def forward(self, inputs): if isinstance(inputs, tuple): outs = tuple([self.gap(x) for x in inputs]) outs = tuple([out.view(x.size(0), -1) for out, x in zip(outs, inputs)]) elif isinstance(inputs, list): outs = [self.gap(x) for x in inputs] outs = [out.view(x.size(0), -1) for out, x in zip(outs, inputs)] elif isinstance(inputs, torch.Tensor): outs = self.gap(inputs) outs = outs.view(inputs.size(0), -1) else: raise TypeError('neck inputs should be tuple or torch.tensor') return outs def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
InvConvNear
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/eu/ceuywbmhtjrjbdgiqzjvxg6kppccq4gu554vmz5lt2nvjjmly3vh.py # Topologically Sorted Source Nodes: [logdet], Original ATen: [aten.eq] # Source node to ATen node mapping: # logdet => eq # Graph fragment: # %eq : [num_users=2] = call_function[target=torch.ops.aten.eq.Scalar](args = (%getitem, -1.0), kwargs = {}) triton_poi_fused_eq_0 = async_compile.triton('triton_poi_fused_eq_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=(2,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_eq_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_eq_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) tmp0 = tl.load(in_ptr0 + (0)) tmp1 = tl.broadcast_to(tmp0, [XBLOCK]) tmp2 = -1.0 tmp3 = tmp1 == tmp2 tl.store(out_ptr0 + (tl.full([XBLOCK], 0, tl.int32)), tmp3, None) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/k6/ck6rcj2aiwjvx7ctsrajcnweqtvvjefeww6f2svf4tye2ixq4d65.py # Topologically Sorted Source Nodes: [x_len, logdet, mul_1, logdet_1], Original ATen: [aten.mul, aten.scalar_tensor, aten.where] # Source node to ATen node mapping: # logdet => full_default_1, where # logdet_1 => mul_2 # mul_1 => mul_1 # x_len => full_default # Graph fragment: # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4], 4.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], nan), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%eq, %full_default_1, %getitem_1), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%where, 1.0), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_1, %full_default), kwargs = {}) triton_poi_fused_mul_scalar_tensor_where_1 = async_compile.triton('triton_poi_fused_mul_scalar_tensor_where_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*i1', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_scalar_tensor_where_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_scalar_tensor_where_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (0)).to(tl.int1) tmp1 = tl.broadcast_to(tmp0, [XBLOCK]) tmp2 = tl.load(in_ptr1 + (0)) tmp3 = tl.broadcast_to(tmp2, [XBLOCK]) tmp4 = float("nan") tmp5 = tl.where(tmp1, tmp4, tmp3) tmp6 = 1.0 tmp7 = tmp5 * tmp6 tmp8 = 4.0 tmp9 = tmp7 * tmp8 tl.store(out_ptr0 + (x0), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/y5/cy55qfc456xtrn5xgyv4h2r4osqtkteize5kc2wocd7ecb52e6fh.py # Topologically Sorted Source Nodes: [z], Original ATen: [aten.convolution] # Source node to ATen node mapping: # z => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%view_1, %view_2, None, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_2 = async_compile.triton('triton_poi_fused_convolution_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 4 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x1)), xmask & ymask) tl.store(out_ptr0 + (x1 + (4*y0)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/6q/c6qeh4gh27f25datiqvo65rhtoulr47ssh6rvmydde7ujxxgpvwu.py # Topologically Sorted Source Nodes: [z_2], Original ATen: [aten.mul] # Source node to ATen node mapping: # z_2 => mul_3 # Graph fragment: # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_4, 1), kwargs = {}) triton_poi_fused_mul_3 = async_compile.triton('triton_poi_fused_mul_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_3(in_out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tl.store(in_out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (1, 4)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [logdet], Original ATen: [aten._linalg_slogdet] buf0 = torch.ops.aten._linalg_slogdet.default(primals_2) buf1 = buf0[0] buf2 = buf0[1] buf3 = buf0[2] buf4 = buf0[3] del buf0 buf5 = empty_strided_cuda((), (), torch.bool) # Topologically Sorted Source Nodes: [logdet], Original ATen: [aten.eq] stream0 = get_raw_stream(0) triton_poi_fused_eq_0.run(buf1, buf5, 1, grid=grid(1), stream=stream0) del buf1 buf6 = empty_strided_cuda((4, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [x_len, logdet, mul_1, logdet_1], Original ATen: [aten.mul, aten.scalar_tensor, aten.where] triton_poi_fused_mul_scalar_tensor_where_1.run(buf5, buf2, buf6, 4, grid=grid(4), stream=stream0) del buf2 buf7 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [z], Original ATen: [aten.convolution] triton_poi_fused_convolution_2.run(primals_2, buf7, 4, 4, grid=grid(4, 4), stream=stream0) # Topologically Sorted Source Nodes: [z], Original ATen: [aten.convolution] buf8 = extern_kernels.convolution(reinterpret_tensor(primals_1, (4, 4, 1, 4), (16, 4, 4, 1), 0), buf7, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 4, 1, 4), (16, 4, 4, 1)) del buf7 buf9 = reinterpret_tensor(buf8, (4, 4, 4), (16, 4, 1), 0); del buf8 # reuse # Topologically Sorted Source Nodes: [z_2], Original ATen: [aten.mul] triton_poi_fused_mul_3.run(buf9, 64, grid=grid(64), stream=stream0) return (buf9, buf6, reinterpret_tensor(primals_1, (4, 4, 1, 4), (16, 4, 8, 1), 0), buf3, buf4, buf5, reinterpret_tensor(primals_2, (4, 4, 1, 1), (1, 4, 4, 4), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (1, 4), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn from torch.nn import functional as F import torch.utils.data class InvConvNear(nn.Module): def __init__(self, channels, n_split=4, no_jacobian=False, **kwargs): super().__init__() assert n_split % 2 == 0 self.channels = channels self.n_split = n_split self.no_jacobian = no_jacobian w_init = torch.qr(torch.FloatTensor(self.n_split, self.n_split). normal_())[0] if torch.det(w_init) < 0: w_init[:, 0] = -1 * w_init[:, 0] self.weight = nn.Parameter(w_init) def forward(self, x, x_mask=None, reverse=False, **kwargs): b, c, t = x.size() assert c % self.n_split == 0 if x_mask is None: x_mask = 1 x_len = torch.ones((b,), dtype=x.dtype, device=x.device) * t else: x_len = torch.sum(x_mask, [1, 2]) x = x.view(b, 2, c // self.n_split, self.n_split // 2, t) x = x.permute(0, 1, 3, 2, 4).contiguous().view(b, self.n_split, c // self.n_split, t) if reverse: if hasattr(self, 'weight_inv'): weight = self.weight_inv else: weight = torch.inverse(self.weight.float()) logdet = None else: weight = self.weight if self.no_jacobian: logdet = 0 else: logdet = torch.logdet(self.weight) * (c / self.n_split) * x_len weight = weight.view(self.n_split, self.n_split, 1, 1) z = F.conv2d(x, weight) z = z.view(b, 2, self.n_split // 2, c // self.n_split, t) z = z.permute(0, 1, 3, 2, 4).contiguous().view(b, c, t) * x_mask return z, logdet def store_inverse(self): self.weight_inv = torch.inverse(self.weight.float()) def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_eq_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) tmp0 = tl.load(in_ptr0 + 0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK]) tmp2 = -1.0 tmp3 = tmp1 == tmp2 tl.store(out_ptr0 + tl.full([XBLOCK], 0, tl.int32), tmp3, None) @triton.jit def triton_poi_fused_mul_scalar_tensor_where_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 0).to(tl.int1) tmp1 = tl.broadcast_to(tmp0, [XBLOCK]) tmp2 = tl.load(in_ptr1 + 0) tmp3 = tl.broadcast_to(tmp2, [XBLOCK]) tmp4 = float('nan') tmp5 = tl.where(tmp1, tmp4, tmp3) tmp6 = 1.0 tmp7 = tmp5 * tmp6 tmp8 = 4.0 tmp9 = tmp7 * tmp8 tl.store(out_ptr0 + x0, tmp9, xmask) @triton.jit def triton_poi_fused_convolution_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 4 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask) tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_mul_3(in_out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tl.store(in_out_ptr0 + x0, tmp2, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (1, 4)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = torch.ops.aten._linalg_slogdet.default(primals_2) buf1 = buf0[0] buf2 = buf0[1] buf3 = buf0[2] buf4 = buf0[3] del buf0 buf5 = empty_strided_cuda((), (), torch.bool) get_raw_stream(0) triton_poi_fused_eq_0[grid(1)](buf1, buf5, 1, XBLOCK=1, num_warps=1, num_stages=1) del buf1 buf6 = empty_strided_cuda((4,), (1,), torch.float32) triton_poi_fused_mul_scalar_tensor_where_1[grid(4)](buf5, buf2, buf6, 4, XBLOCK=4, num_warps=1, num_stages=1) del buf2 buf7 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32) triton_poi_fused_convolution_2[grid(4, 4)](primals_2, buf7, 4, 4, XBLOCK=4, YBLOCK=4, num_warps=1, num_stages=1) buf8 = extern_kernels.convolution(reinterpret_tensor(primals_1, (4, 4, 1, 4), (16, 4, 4, 1), 0), buf7, stride=(1, 1), padding=(0, 0 ), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 4, 1, 4), (16, 4, 4, 1)) del buf7 buf9 = reinterpret_tensor(buf8, (4, 4, 4), (16, 4, 1), 0) del buf8 triton_poi_fused_mul_3[grid(64)](buf9, 64, XBLOCK=64, num_warps=1, num_stages=1) return buf9, buf6, reinterpret_tensor(primals_1, (4, 4, 1, 4), (16, 4, 8, 1), 0), buf3, buf4, buf5, reinterpret_tensor(primals_2, (4, 4, 1, 1), (1, 4, 4, 4), 0) class InvConvNearNew(nn.Module): def __init__(self, channels, n_split=4, no_jacobian=False, **kwargs): super().__init__() assert n_split % 2 == 0 self.channels = channels self.n_split = n_split self.no_jacobian = no_jacobian w_init = torch.qr(torch.FloatTensor(self.n_split, self.n_split). normal_())[0] if torch.det(w_init) < 0: w_init[:, 0] = -1 * w_init[:, 0] self.weight = nn.Parameter(w_init) def store_inverse(self): self.weight_inv = torch.inverse(self.weight.float()) def forward(self, input_0): primals_2 = self.weight primals_1 = input_0 output = call([primals_1, primals_2]) return output[0], output[1]
AndreHe02/glow-tts
InvConvNear
false
2,051
[ "MIT" ]
0
683f68f17790f2f46c23e9d3eadbcac352d82e2b
https://github.com/AndreHe02/glow-tts/tree/683f68f17790f2f46c23e9d3eadbcac352d82e2b
import torch from torch import nn from torch.nn import functional as F import torch.utils.data class Model(nn.Module): def __init__(self, channels, n_split=4, no_jacobian=False, **kwargs): super().__init__() assert n_split % 2 == 0 self.channels = channels self.n_split = n_split self.no_jacobian = no_jacobian w_init = torch.qr(torch.FloatTensor(self.n_split, self.n_split). normal_())[0] if torch.det(w_init) < 0: w_init[:, 0] = -1 * w_init[:, 0] self.weight = nn.Parameter(w_init) def forward(self, x, x_mask=None, reverse=False, **kwargs): b, c, t = x.size() assert c % self.n_split == 0 if x_mask is None: x_mask = 1 x_len = torch.ones((b,), dtype=x.dtype, device=x.device) * t else: x_len = torch.sum(x_mask, [1, 2]) x = x.view(b, 2, c // self.n_split, self.n_split // 2, t) x = x.permute(0, 1, 3, 2, 4).contiguous().view(b, self.n_split, c // self.n_split, t) if reverse: if hasattr(self, 'weight_inv'): weight = self.weight_inv else: weight = torch.inverse(self.weight.float()) logdet = None else: weight = self.weight if self.no_jacobian: logdet = 0 else: logdet = torch.logdet(self.weight) * (c / self.n_split) * x_len weight = weight.view(self.n_split, self.n_split, 1, 1) z = F.conv2d(x, weight) z = z.view(b, 2, self.n_split // 2, c // self.n_split, t) z = z.permute(0, 1, 3, 2, 4).contiguous().view(b, c, t) * x_mask return z, logdet def store_inverse(self): self.weight_inv = torch.inverse(self.weight.float()) def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [4]
InterWeightedBCEWithLogits
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/ym/cymwvmkwrvcnjt5nt3y4cvn7zwvwctmhlghqmizfeficvct6vlpz.py # Topologically Sorted Source Nodes: [loss, loss_1, loss_cls], Original ATen: [aten.binary_cross_entropy_with_logits, aten.mean, aten.mul] # Source node to ATen node mapping: # loss => abs_1, exp, full_default, log1p, minimum, mul, neg, sub, sub_1, sub_2 # loss_1 => mean # loss_cls => mul_1 # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg1_1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %arg0_1), kwargs = {}) # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %minimum : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%full_default, %arg0_1), kwargs = {}) # %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%arg0_1,), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%abs_1,), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%neg,), kwargs = {}) # %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum, %log1p), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %sub_1), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sub_2,), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean, 1.0), kwargs = {}) triton_per_fused_binary_cross_entropy_with_logits_mean_mul_0 = async_compile.triton('triton_per_fused_binary_cross_entropy_with_logits_mean_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_binary_cross_entropy_with_logits_mean_mul_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_binary_cross_entropy_with_logits_mean_mul_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp3 = tl.load(in_ptr1 + (r0), None) tmp1 = 1.0 tmp2 = tmp1 - tmp0 tmp4 = tmp2 * tmp3 tmp5 = 0.0 tmp6 = triton_helpers.minimum(tmp5, tmp3) tmp7 = tl_math.abs(tmp3) tmp8 = -tmp7 tmp9 = tl_math.exp(tmp8) tmp10 = libdevice.log1p(tmp9) tmp11 = tmp6 - tmp10 tmp12 = tmp4 - tmp11 tmp13 = tl.broadcast_to(tmp12, [RBLOCK]) tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0)) tmp16 = 256.0 tmp17 = tmp15 / tmp16 tmp18 = tmp17 * tmp1 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp18, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [loss, loss_1, loss_cls], Original ATen: [aten.binary_cross_entropy_with_logits, aten.mean, aten.mul] stream0 = get_raw_stream(0) triton_per_fused_binary_cross_entropy_with_logits_mean_mul_0.run(buf1, arg1_1, arg0_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import Tensor import torch.nn as nn from typing import Optional from typing import Any import torch.nn.functional as F def reduce_loss(loss, reduction): """Reduce loss as specified. Args: loss (Tensor): Elementwise loss tensor. reduction (str): Options are "none", "mean" and "sum". Return: Tensor: Reduced loss tensor. """ reduction_enum = F._Reduction.get_enum(reduction) if reduction_enum == 0: return loss elif reduction_enum == 1: return loss.mean() elif reduction_enum == 2: return loss.sum() def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None): """Apply element-wise weight and reduce loss. Args: loss (Tensor): Element-wise loss. weight (Tensor): Element-wise weights. reduction (str): Same as built-in losses of PyTorch. avg_factor (float): Average factor when computing the mean of losses. Returns: Tensor: Processed loss values. """ if weight is not None: loss = loss * weight if avg_factor is None: loss = reduce_loss(loss, reduction) elif reduction == 'mean': loss = loss.sum() / avg_factor elif reduction != 'none': raise ValueError('avg_factor can not be used with reduction="sum"') return loss def binary_cross_entropy(pred, label, weight=None, reduction='mean', avg_factor=None, class_weight=None, pos_weight=None): """Calculate the binary CrossEntropy loss with logits. Args: pred (torch.Tensor): The prediction with shape (N, \\*). label (torch.Tensor): The gt label with shape (N, \\*). weight (torch.Tensor, optional): Element-wise weight of loss with shape (N, ). Defaults to None. reduction (str): The method used to reduce the loss. Options are "none", "mean" and "sum". If reduction is 'none' , loss is same shape as pred and label. Defaults to 'mean'. avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. class_weight (torch.Tensor, optional): The weight for each class with shape (C), C is the number of classes. Default None. pos_weight (torch.Tensor, optional): The positive weight for each class with shape (C), C is the number of classes. Default None. Returns: torch.Tensor: The calculated loss """ assert pred.dim() == label.dim() if class_weight is not None: N = pred.size()[0] class_weight = class_weight.repeat(N, 1) loss = F.binary_cross_entropy_with_logits(pred, label, weight= class_weight, pos_weight=pos_weight, reduction='none') if weight is not None: assert weight.dim() == 1 weight = weight.float() if pred.dim() > 1: weight = weight.reshape(-1, 1) loss = weight_reduce_loss(loss, weight=weight, reduction=reduction, avg_factor=avg_factor) return loss class InterWeightedBCEWithLogits(nn.Module): def __init__(self, reduction: 'str'='mean', loss_weight: 'float'=1.0 ) ->None: super(InterWeightedBCEWithLogits, self).__init__() self.reduction = reduction self.loss_weight = loss_weight self.register_buffer('class_weight', None) def receive_data_dist_info(self, num_pos_neg: 'Tensor') ->None: """Weight for each class is sqrt(n_c / (n_dominant + n_total))""" num_pos = num_pos_neg[0] num_dominant = num_pos.max() class_weight = torch.sqrt(num_pos / (num_dominant + num_pos.sum())) class_weight /= class_weight.sum() self.class_weight = class_weight def forward(self, cls_score: 'Tensor', label: 'Tensor', weight: 'Optional[Tensor]'=None, avg_factor: 'Optional[float]'=None, reduction_override: 'Optional[str]'=None, **kwargs: Any ) ->torch.Tensor: assert reduction_override in (None, 'none', 'mean', 'sum') reduction = (reduction_override if reduction_override else self. reduction) loss_cls = self.loss_weight * binary_cross_entropy(cls_score, label, weight, class_weight=self.class_weight, reduction=reduction, avg_factor=avg_factor, **kwargs) return loss_cls def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch import Tensor import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_binary_cross_entropy_with_logits_mean_mul_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp3 = tl.load(in_ptr1 + r0, None) tmp1 = 1.0 tmp2 = tmp1 - tmp0 tmp4 = tmp2 * tmp3 tmp5 = 0.0 tmp6 = triton_helpers.minimum(tmp5, tmp3) tmp7 = tl_math.abs(tmp3) tmp8 = -tmp7 tmp9 = tl_math.exp(tmp8) tmp10 = libdevice.log1p(tmp9) tmp11 = tmp6 - tmp10 tmp12 = tmp4 - tmp11 tmp13 = tl.broadcast_to(tmp12, [RBLOCK]) tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0)) tmp16 = 256.0 tmp17 = tmp15 / tmp16 tmp18 = tmp17 * tmp1 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp18, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_binary_cross_entropy_with_logits_mean_mul_0[grid(1)]( buf1, arg1_1, arg0_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, def reduce_loss(loss, reduction): """Reduce loss as specified. Args: loss (Tensor): Elementwise loss tensor. reduction (str): Options are "none", "mean" and "sum". Return: Tensor: Reduced loss tensor. """ reduction_enum = F._Reduction.get_enum(reduction) if reduction_enum == 0: return loss elif reduction_enum == 1: return loss.mean() elif reduction_enum == 2: return loss.sum() def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None): """Apply element-wise weight and reduce loss. Args: loss (Tensor): Element-wise loss. weight (Tensor): Element-wise weights. reduction (str): Same as built-in losses of PyTorch. avg_factor (float): Average factor when computing the mean of losses. Returns: Tensor: Processed loss values. """ if weight is not None: loss = loss * weight if avg_factor is None: loss = reduce_loss(loss, reduction) elif reduction == 'mean': loss = loss.sum() / avg_factor elif reduction != 'none': raise ValueError('avg_factor can not be used with reduction="sum"') return loss def binary_cross_entropy(pred, label, weight=None, reduction='mean', avg_factor=None, class_weight=None, pos_weight=None): """Calculate the binary CrossEntropy loss with logits. Args: pred (torch.Tensor): The prediction with shape (N, \\*). label (torch.Tensor): The gt label with shape (N, \\*). weight (torch.Tensor, optional): Element-wise weight of loss with shape (N, ). Defaults to None. reduction (str): The method used to reduce the loss. Options are "none", "mean" and "sum". If reduction is 'none' , loss is same shape as pred and label. Defaults to 'mean'. avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. class_weight (torch.Tensor, optional): The weight for each class with shape (C), C is the number of classes. Default None. pos_weight (torch.Tensor, optional): The positive weight for each class with shape (C), C is the number of classes. Default None. Returns: torch.Tensor: The calculated loss """ assert pred.dim() == label.dim() if class_weight is not None: N = pred.size()[0] class_weight = class_weight.repeat(N, 1) loss = F.binary_cross_entropy_with_logits(pred, label, weight= class_weight, pos_weight=pos_weight, reduction='none') if weight is not None: assert weight.dim() == 1 weight = weight.float() if pred.dim() > 1: weight = weight.reshape(-1, 1) loss = weight_reduce_loss(loss, weight=weight, reduction=reduction, avg_factor=avg_factor) return loss class InterWeightedBCEWithLogitsNew(nn.Module): def __init__(self, reduction: 'str'='mean', loss_weight: 'float'=1.0 ) ->None: super(InterWeightedBCEWithLogitsNew, self).__init__() self.reduction = reduction self.loss_weight = loss_weight self.register_buffer('class_weight', None) def receive_data_dist_info(self, num_pos_neg: 'Tensor') ->None: """Weight for each class is sqrt(n_c / (n_dominant + n_total))""" num_pos = num_pos_neg[0] num_dominant = num_pos.max() class_weight = torch.sqrt(num_pos / (num_dominant + num_pos.sum())) class_weight /= class_weight.sum() self.class_weight = class_weight def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
CAMP-eXplain-AI/imba-explain
InterWeightedBCEWithLogits
false
2,052
[ "MIT" ]
0
e41b4ca5de63955cb0e925aad9599f38c5a3e973
https://github.com/CAMP-eXplain-AI/imba-explain/tree/e41b4ca5de63955cb0e925aad9599f38c5a3e973
import torch from torch import Tensor import torch.nn as nn from typing import Optional from typing import Any import torch.nn.functional as F def reduce_loss(loss, reduction): """Reduce loss as specified. Args: loss (Tensor): Elementwise loss tensor. reduction (str): Options are "none", "mean" and "sum". Return: Tensor: Reduced loss tensor. """ reduction_enum = F._Reduction.get_enum(reduction) if reduction_enum == 0: return loss elif reduction_enum == 1: return loss.mean() elif reduction_enum == 2: return loss.sum() def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None): """Apply element-wise weight and reduce loss. Args: loss (Tensor): Element-wise loss. weight (Tensor): Element-wise weights. reduction (str): Same as built-in losses of PyTorch. avg_factor (float): Average factor when computing the mean of losses. Returns: Tensor: Processed loss values. """ if weight is not None: loss = loss * weight if avg_factor is None: loss = reduce_loss(loss, reduction) elif reduction == 'mean': loss = loss.sum() / avg_factor elif reduction != 'none': raise ValueError('avg_factor can not be used with reduction="sum"') return loss def binary_cross_entropy(pred, label, weight=None, reduction='mean', avg_factor=None, class_weight=None, pos_weight=None): """Calculate the binary CrossEntropy loss with logits. Args: pred (torch.Tensor): The prediction with shape (N, \\*). label (torch.Tensor): The gt label with shape (N, \\*). weight (torch.Tensor, optional): Element-wise weight of loss with shape (N, ). Defaults to None. reduction (str): The method used to reduce the loss. Options are "none", "mean" and "sum". If reduction is 'none' , loss is same shape as pred and label. Defaults to 'mean'. avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. class_weight (torch.Tensor, optional): The weight for each class with shape (C), C is the number of classes. Default None. pos_weight (torch.Tensor, optional): The positive weight for each class with shape (C), C is the number of classes. Default None. Returns: torch.Tensor: The calculated loss """ assert pred.dim() == label.dim() if class_weight is not None: N = pred.size()[0] class_weight = class_weight.repeat(N, 1) loss = F.binary_cross_entropy_with_logits(pred, label, weight= class_weight, pos_weight=pos_weight, reduction='none') if weight is not None: assert weight.dim() == 1 weight = weight.float() if pred.dim() > 1: weight = weight.reshape(-1, 1) loss = weight_reduce_loss(loss, weight=weight, reduction=reduction, avg_factor=avg_factor) return loss class Model(nn.Module): def __init__(self, reduction: 'str'='mean', loss_weight: 'float'=1.0 ) ->None: super().__init__() self.reduction = reduction self.loss_weight = loss_weight self.register_buffer('class_weight', None) def receive_data_dist_info(self, num_pos_neg: 'Tensor') ->None: """Weight for each class is sqrt(n_c / (n_dominant + n_total))""" num_pos = num_pos_neg[0] num_dominant = num_pos.max() class_weight = torch.sqrt(num_pos / (num_dominant + num_pos.sum())) class_weight /= class_weight.sum() self.class_weight = class_weight def forward(self, cls_score: 'Tensor', label: 'Tensor', weight: 'Optional[Tensor]'=None, avg_factor: 'Optional[float]'=None, reduction_override: 'Optional[str]'=None, **kwargs: Any ) ->torch.Tensor: assert reduction_override in (None, 'none', 'mean', 'sum') reduction = (reduction_ # ... truncated (>4000 chars) for memory efficiency