entry_point
stringlengths
1
65
original_triton_code
stringlengths
4.5k
619k
python_code
stringlengths
208
60.9k
triton_code
stringlengths
1.15k
275k
repo_name
stringlengths
7
115
module_name
stringlengths
1
65
synthetic
bool
1 class
uuid
int64
0
18.5k
licenses
sequencelengths
1
6
stars
int64
0
19.8k
sha
stringlengths
40
40
repo_link
stringlengths
72
180
pytorch_code
stringlengths
200
4.05k
ResBlock
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/td/ctdybbibnws4d7ukbk3fpn35zkgapxylowdhzwx7vgsllncbdrxa.py # Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # x => convolution # x_1 => relu # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_1, %primals_2, %primals_3, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/l3/cl3kktfjbfxvoqsgvjon5fk5ycnqjp7n2a3dk3gk4gw4n2jfe25m.py # Topologically Sorted Source Nodes: [x_2, x_3, x_4], Original ATen: [aten.convolution, aten.add, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # x_2 => convolution_1 # x_3 => add # x_4 => relu_1 # Graph fragment: # %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution_1, %primals_1), kwargs = {}) # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {}) triton_poi_fused_add_convolution_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_add_convolution_relu_threshold_backward_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_convolution_relu_threshold_backward_1(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x3), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp5 = tl.full([1], 0, tl.int32) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = 0.0 tmp8 = tmp6 <= tmp7 tl.store(in_out_ptr0 + (x3), tmp6, xmask) tl.store(out_ptr0 + (x3), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_convolution_relu_0.run(buf1, primals_3, 256, grid=grid(256), stream=stream0) del primals_3 # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1)) buf3 = buf2; del buf2 # reuse buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [x_2, x_3, x_4], Original ATen: [aten.convolution, aten.add, aten.relu, aten.threshold_backward] triton_poi_fused_add_convolution_relu_threshold_backward_1.run(buf3, primals_5, primals_1, buf4, 256, grid=grid(256), stream=stream0) del primals_5 return (buf3, primals_1, primals_2, primals_4, buf1, buf4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn import torch.utils.data import torch.autograd class ResBlock(nn.Module): def __init__(self, num_features, use_batch_norm=False): super(ResBlock, self).__init__() self.num_features = num_features self.conv_layer1 = nn.Conv2d(num_features, num_features, kernel_size=3, stride=1, padding=1) self.relu_layer = nn.ReLU() self.conv_layer2 = nn.Conv2d(num_features, num_features, kernel_size=3, stride=1, padding=1) self.use_batch_norm = use_batch_norm if self.use_batch_norm: self.batch_norm_layer1 = nn.BatchNorm2d(self.num_features) self.batch_norm_layer2 = nn.BatchNorm2d(self.num_features) for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight) def forward(self, x): residual = x x = self.conv_layer1(x) if self.use_batch_norm: x = self.batch_norm_layer1(x) x = self.relu_layer(x) x = self.conv_layer2(x) if self.use_batch_norm: x = self.batch_norm_layer2(x) x += residual x = self.relu_layer(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn import torch.utils.data import torch.autograd assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_add_convolution_relu_threshold_backward_1(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x3, xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp5 = tl.full([1], 0, tl.int32) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = 0.0 tmp8 = tmp6 <= tmp7 tl.store(in_out_ptr0 + x3, tmp6, xmask) tl.store(out_ptr0 + x3, tmp8, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(256)](buf1, primals_3, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_3 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1)) buf3 = buf2 del buf2 buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_add_convolution_relu_threshold_backward_1[grid(256)]( buf3, primals_5, primals_1, buf4, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 return buf3, primals_1, primals_2, primals_4, buf1, buf4 class ResBlockNew(nn.Module): def __init__(self, num_features, use_batch_norm=False): super(ResBlockNew, self).__init__() self.num_features = num_features self.conv_layer1 = nn.Conv2d(num_features, num_features, kernel_size=3, stride=1, padding=1) self.relu_layer = nn.ReLU() self.conv_layer2 = nn.Conv2d(num_features, num_features, kernel_size=3, stride=1, padding=1) self.use_batch_norm = use_batch_norm if self.use_batch_norm: self.batch_norm_layer1 = nn.BatchNorm2d(self.num_features) self.batch_norm_layer2 = nn.BatchNorm2d(self.num_features) for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight) def forward(self, input_0): primals_2 = self.conv_layer1.weight primals_3 = self.conv_layer1.bias primals_4 = self.conv_layer2.weight primals_5 = self.conv_layer2.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
HubBucket-Team/annotated_deep_learning_paper_implementations
ResBlock
false
5,312
[ "MIT" ]
1
4a9716b01e336c57739dfdbdd90648276b53c433
https://github.com/HubBucket-Team/annotated_deep_learning_paper_implementations/tree/4a9716b01e336c57739dfdbdd90648276b53c433
import torch from torch import nn import torch.utils.data import torch.autograd class Model(nn.Module): def __init__(self, num_features, use_batch_norm=False): super().__init__() self.num_features = num_features self.conv_layer1 = nn.Conv2d(num_features, num_features, kernel_size=3, stride=1, padding=1) self.relu_layer = nn.ReLU() self.conv_layer2 = nn.Conv2d(num_features, num_features, kernel_size=3, stride=1, padding=1) self.use_batch_norm = use_batch_norm if self.use_batch_norm: self.batch_norm_layer1 = nn.BatchNorm2d(self.num_features) self.batch_norm_layer2 = nn.BatchNorm2d(self.num_features) for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight) def forward(self, x): residual = x x = self.conv_layer1(x) if self.use_batch_norm: x = self.batch_norm_layer1(x) x = self.relu_layer(x) x = self.conv_layer2(x) if self.use_batch_norm: x = self.batch_norm_layer2(x) x += residual x = self.relu_layer(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4]
_Logit
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/f3/cf3wjo3codglmel3mdjaodbq3s3viwdoc74iaz5e3kntwsnjtjqi.py # Topologically Sorted Source Nodes: [sigmoid], Original ATen: [aten.sigmoid] # Source node to ATen node mapping: # sigmoid => sigmoid # Graph fragment: # %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_3), kwargs = {}) # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%add_tensor,), kwargs = {}) triton_poi_fused_sigmoid_0 = async_compile.triton('triton_poi_fused_sigmoid_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_sigmoid_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr0 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.sigmoid(tmp3) tl.store(in_out_ptr0 + (x0), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1, 4), (4, 1)) assert_size_stride(primals_3, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 1), (1, 4), 0), out=buf0) del primals_2 buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [sigmoid], Original ATen: [aten.sigmoid] stream0 = get_raw_stream(0) triton_poi_fused_sigmoid_0.run(buf1, primals_3, 64, grid=grid(64), stream=stream0) del primals_3 return (buf1, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch class _Logit(torch.nn.Module): """ Simple logistic regression model. """ def __init__(self, din, dout=1): """ Model parameter constructor. Args: din Number of input dimensions dout Number of output dimensions """ super().__init__() self._din = din self._dout = dout self._linear = torch.nn.Linear(din, dout) def forward(self, x): """ Model's forward pass. Args: x Input tensor Returns: Output tensor """ return torch.sigmoid(self._linear(x.view(-1, self._din))) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'din': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_sigmoid_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.sigmoid(tmp3) tl.store(in_out_ptr0 + x0, tmp4, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1, 4), (4, 1)) assert_size_stride(primals_3, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 1), (1, 4), 0), out=buf0) del primals_2 buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_sigmoid_0[grid(64)](buf1, primals_3, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_3 return buf1, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), buf1 class _LogitNew(torch.nn.Module): """ Simple logistic regression model. """ def __init__(self, din, dout=1): """ Model parameter constructor. Args: din Number of input dimensions dout Number of output dimensions """ super().__init__() self._din = din self._dout = dout self._linear = torch.nn.Linear(din, dout) def forward(self, input_0): primals_2 = self._linear.weight primals_3 = self._linear.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
IKACE/DifferentialByzantine-1
_Logit
false
5,313
[ "MIT" ]
1
809fd6e070fedeb87a6dbff6f883e93e3c5c8e09
https://github.com/IKACE/DifferentialByzantine-1/tree/809fd6e070fedeb87a6dbff6f883e93e3c5c8e09
import torch class Model(torch.nn.Module): """ Simple logistic regression model. """ def __init__(self, din, dout=1): """ Model parameter constructor. Args: din Number of input dimensions dout Number of output dimensions """ super().__init__() self._din = din self._dout = dout self._linear = torch.nn.Linear(din, dout) def forward(self, x): """ Model's forward pass. Args: x Input tensor Returns: Output tensor """ return torch.sigmoid(self._linear(x.view(-1, self._din))) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4]
PytorchMultiClass
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/y3/cy3bfx7suhzmqovrmgwr7kipqmduyq6otbqkuvdqlaprgg7g2dgl.py # Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # relu => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) # %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8192], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 5120 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 80 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, xmask) tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/qw/cqwdvuc66lglzux6l2qprkpkyfvgktk37ixovgb2zaonbhwfr275.py # Topologically Sorted Source Nodes: [relu_1], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # relu_1 => relu_1 # Graph fragment: # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_3,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_relu_threshold_backward_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8192], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 6400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 100 x2 = xindex % 1600 x3 = (xindex // 1600) tmp0 = tl.load(in_out_ptr0 + (x4), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x4), tmp4, xmask) tl.store(out_ptr0 + (x2 + (1664*x3)), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/v5/cv5xjxyiwj5srnazlm7rtrf3tpnol2tiyeemqiewxgcavr2kspjd.py # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] # Source node to ATen node mapping: # softmax => amax, exp, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_5, [1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_5, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8192], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 6656 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 416 x2 = (xindex // 1664) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (1664*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (416 + x0 + (1664*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (832 + x0 + (1664*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (1248 + x0 + (1664*x2)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x3), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/uj/cuj4ikhlf62z5znkjrccjodr6mhfmyr6almnznd5zp72mllajsj4.py # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] # Source node to ATen node mapping: # softmax => div, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_3 = async_compile.triton('triton_poi_fused__softmax_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8192], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 6656 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 416 x2 = (xindex // 1664) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (1664*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (416 + x0 + (1664*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (832 + x0 + (1664*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (1248 + x0 + (1664*x2)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x3), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (80, 4), (4, 1)) assert_size_stride(primals_2, (80, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (100, 80), (80, 1)) assert_size_stride(primals_5, (100, ), (1, )) assert_size_stride(primals_6, (104, 100), (100, 1)) assert_size_stride(primals_7, (104, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 80), (80, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 80), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 80), (1280, 320, 80, 1), 0); del buf0 # reuse buf8 = empty_strided_cuda((4, 4, 4, 80), (1280, 320, 80, 1), torch.bool) # Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf8, 5120, grid=grid(5120), stream=stream0) del primals_2 buf2 = empty_strided_cuda((64, 100), (100, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf1, (64, 80), (80, 1), 0), reinterpret_tensor(primals_4, (80, 100), (1, 80), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 100), (1600, 400, 100, 1), 0); del buf2 # reuse buf7 = empty_strided_cuda((4, 4, 4, 100), (1664, 400, 100, 1), torch.bool) # Topologically Sorted Source Nodes: [relu_1], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_1.run(buf3, primals_5, buf7, 6400, grid=grid(6400), stream=stream0) del primals_5 buf4 = empty_strided_cuda((64, 104), (104, 1), torch.float32) # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.addmm] extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 100), (100, 1), 0), reinterpret_tensor(primals_6, (100, 104), (1, 100), 0), alpha=1, beta=1, out=buf4) del primals_7 buf5 = empty_strided_cuda((4, 4, 4, 104), (1664, 416, 104, 1), torch.float32) # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] triton_poi_fused__softmax_2.run(buf4, buf5, 6656, grid=grid(6656), stream=stream0) buf6 = reinterpret_tensor(buf4, (4, 4, 4, 104), (1664, 416, 104, 1), 0); del buf4 # reuse # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] triton_poi_fused__softmax_3.run(buf5, buf6, 6656, grid=grid(6656), stream=stream0) del buf5 return (buf6, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 80), (80, 1), 0), reinterpret_tensor(buf3, (64, 100), (100, 1), 0), buf6, primals_6, buf7, primals_4, buf8, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((80, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((80, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((100, 80), (80, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((100, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((104, 100), (100, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((104, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class PytorchMultiClass(nn.Module): def __init__(self, num_features): super(PytorchMultiClass, self).__init__() self.layer_1 = nn.Linear(num_features, 80) self.layer_2 = nn.Linear(80, 100) self.layer_out = nn.Linear(100, 104) self.softmax = nn.Softmax(dim=1) def forward(self, x): x = F.dropout(F.relu(self.layer_1(x)), training=self.training) x = F.dropout(F.relu(self.layer_2(x)), training=self.training) x = self.layer_out(x) return self.softmax(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 5120 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 80 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 6400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 100 x2 = xindex % 1600 x3 = xindex // 1600 tmp0 = tl.load(in_out_ptr0 + x4, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x4, tmp4, xmask) tl.store(out_ptr0 + (x2 + 1664 * x3), tmp6, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 6656 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 416 x2 = xindex // 1664 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 1664 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (416 + x0 + 1664 * x2), xmask, eviction_policy ='evict_last') tmp4 = tl.load(in_ptr0 + (832 + x0 + 1664 * x2), xmask, eviction_policy ='evict_last') tmp6 = tl.load(in_ptr0 + (1248 + x0 + 1664 * x2), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x3, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 6656 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 416 x2 = xindex // 1664 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 1664 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (416 + x0 + 1664 * x2), xmask, eviction_policy ='evict_last') tmp4 = tl.load(in_ptr0 + (832 + x0 + 1664 * x2), xmask, eviction_policy ='evict_last') tmp6 = tl.load(in_ptr0 + (1248 + x0 + 1664 * x2), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (80, 4), (4, 1)) assert_size_stride(primals_2, (80,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (100, 80), (80, 1)) assert_size_stride(primals_5, (100,), (1,)) assert_size_stride(primals_6, (104, 100), (100, 1)) assert_size_stride(primals_7, (104,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 80), (80, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 80), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 80), (1280, 320, 80, 1), 0) del buf0 buf8 = empty_strided_cuda((4, 4, 4, 80), (1280, 320, 80, 1), torch.bool ) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(5120)](buf1, primals_2, buf8, 5120, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 100), (100, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 80), (80, 1), 0), reinterpret_tensor(primals_4, (80, 100), (1, 80), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 100), (1600, 400, 100, 1), 0) del buf2 buf7 = empty_strided_cuda((4, 4, 4, 100), (1664, 400, 100, 1), torch.bool) triton_poi_fused_relu_threshold_backward_1[grid(6400)](buf3, primals_5, buf7, 6400, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 104), (104, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 100), (100, 1), 0), reinterpret_tensor(primals_6, (100, 104), (1, 100 ), 0), alpha=1, beta=1, out=buf4) del primals_7 buf5 = empty_strided_cuda((4, 4, 4, 104), (1664, 416, 104, 1), torch.float32) triton_poi_fused__softmax_2[grid(6656)](buf4, buf5, 6656, XBLOCK= 256, num_warps=4, num_stages=1) buf6 = reinterpret_tensor(buf4, (4, 4, 4, 104), (1664, 416, 104, 1), 0) del buf4 triton_poi_fused__softmax_3[grid(6656)](buf5, buf6, 6656, XBLOCK= 128, num_warps=4, num_stages=1) del buf5 return buf6, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 80), (80, 1), 0), reinterpret_tensor( buf3, (64, 100), (100, 1), 0), buf6, primals_6, buf7, primals_4, buf8 class PytorchMultiClassNew(nn.Module): def __init__(self, num_features): super(PytorchMultiClassNew, self).__init__() self.layer_1 = nn.Linear(num_features, 80) self.layer_2 = nn.Linear(80, 100) self.layer_out = nn.Linear(100, 104) self.softmax = nn.Softmax(dim=1) def forward(self, input_0): primals_1 = self.layer_1.weight primals_2 = self.layer_1.bias primals_4 = self.layer_2.weight primals_5 = self.layer_2.bias primals_6 = self.layer_out.weight primals_7 = self.layer_out.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
HninPwint/beer_type_prediction
PytorchMultiClass
false
5,314
[ "MIT" ]
1
6845920821bedc059dbe92af5c4a7689cb616023
https://github.com/HninPwint/beer_type_prediction/tree/6845920821bedc059dbe92af5c4a7689cb616023
import torch import torch.nn as nn import torch.nn.functional as F class Model(nn.Module): def __init__(self, num_features): super().__init__() self.layer_1 = nn.Linear(num_features, 80) self.layer_2 = nn.Linear(80, 100) self.layer_out = nn.Linear(100, 104) self.softmax = nn.Softmax(dim=1) def forward(self, x): x = F.dropout(F.relu(self.layer_1(x)), training=self.training) x = F.dropout(F.relu(self.layer_2(x)), training=self.training) x = self.layer_out(x) return self.softmax(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4]
VGG_block
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/ej/cejfrwnzxinkchwn6symdb72fdtj7gix5hy2vuswodhbeh45mrae.py # Topologically Sorted Source Nodes: [output_1, output_2], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # output_1 => convolution # output_2 => relu # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1048576], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1048576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 4096) % 64 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/7z/c7zuih2ysjtir5rh5seep5ijnhokjlgkyjw2edhf257ahvz4iipr.py # Topologically Sorted Source Nodes: [output_5], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # output_5 => getitem, getitem_1 # Graph fragment: # %getitem : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {}) # %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_1 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[262144], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 262144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex % 32 x1 = (xindex // 32) x2 = xindex tmp0 = tl.load(in_ptr0 + ((2*x0) + (128*x1)), None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (128*x1)), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (64 + (2*x0) + (128*x1)), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (65 + (2*x0) + (128*x1)), None, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x2), tmp6, None) tl.store(out_ptr1 + (x2), tmp16, None) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/xq/cxqz2dr7nh2qabrtemj52pazmhrknj5ltcy32ka252ia6a3jgpqi.py # Topologically Sorted Source Nodes: [output_6, output_7], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # output_6 => convolution_2 # output_7 => relu_2 # Graph fragment: # %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem, %primals_6, %primals_7, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_2,), kwargs = {}) triton_poi_fused_convolution_relu_2 = async_compile.triton('triton_poi_fused_convolution_relu_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[524288], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 524288 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 1024) % 128 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/pr/cpri5daxkfbmt5ostbhb5o2avircr64a2rmdkxfackaxyjfc7owe.py # Topologically Sorted Source Nodes: [output_10], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # output_10 => getitem_2, getitem_3 # Graph fragment: # %getitem_2 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 0), kwargs = {}) # %getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_3 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[131072], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 131072 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex % 16 x1 = (xindex // 16) x2 = xindex tmp0 = tl.load(in_ptr0 + ((2*x0) + (64*x1)), None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (64*x1)), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (32 + (2*x0) + (64*x1)), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (33 + (2*x0) + (64*x1)), None, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x2), tmp6, None) tl.store(out_ptr1 + (x2), tmp16, None) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/of/cof37d5wbqzvtkioj7k4me7wqpvfv55rs62ytonj7gij2o3abnod.py # Topologically Sorted Source Nodes: [output_11, output_12], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # output_11 => convolution_4 # output_12 => relu_4 # Graph fragment: # %convolution_4 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_2, %primals_10, %primals_11, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_4 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_4,), kwargs = {}) triton_poi_fused_convolution_relu_4 = async_compile.triton('triton_poi_fused_convolution_relu_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[262144], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 262144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 256) % 256 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/mn/cmnzsv2cdbsuq2sygridqvwumzmcvknuthlumel5m25l2ajsr4ft.py # Topologically Sorted Source Nodes: [output_19], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # output_19 => getitem_4, getitem_5 # Graph fragment: # %getitem_4 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_2, 0), kwargs = {}) # %getitem_5 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_2, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_5 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_5(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 65536 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex % 8 x1 = (xindex // 8) x2 = xindex tmp0 = tl.load(in_ptr0 + ((2*x0) + (32*x1)), None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (32*x1)), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (16 + (2*x0) + (32*x1)), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (17 + (2*x0) + (32*x1)), None, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x2), tmp6, None) tl.store(out_ptr1 + (x2), tmp16, None) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/ic/cicsjqc3cfcjzqlztx4hz7ssqwe47ngo3g2onc6463k3vgfmt5cw.py # Topologically Sorted Source Nodes: [output_20, output_21], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # output_20 => convolution_8 # output_21 => relu_8 # Graph fragment: # %convolution_8 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_4, %primals_18, %primals_19, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_8 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_8,), kwargs = {}) triton_poi_fused_convolution_relu_6 = async_compile.triton('triton_poi_fused_convolution_relu_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[131072], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_6', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 131072 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 64) % 512 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/j4/cj4ap37hjun27ng5v4gtm7gil2vlzjiec46qmelx43ro5k22fwkx.py # Topologically Sorted Source Nodes: [output_22, output_23], Original ATen: [aten.convolution, aten._prelu_kernel] # Source node to ATen node mapping: # output_22 => convolution_9 # output_23 => gt, mul, where # Graph fragment: # %convolution_9 : [num_users=4] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_8, %primals_20, %primals_21, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_9, 0), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view, %convolution_9), kwargs = {}) # %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt, %convolution_9, %mul), kwargs = {}) triton_poi_fused__prelu_kernel_convolution_7 = async_compile.triton('triton_poi_fused__prelu_kernel_convolution_7', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[131072], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__prelu_kernel_convolution_7', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__prelu_kernel_convolution_7(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 131072 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 64) % 512 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp6 = tmp5 * tmp2 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(in_out_ptr0 + (x3), tmp2, None) tl.store(out_ptr0 + (x3), tmp7, None) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/3y/c3yrqinctlrh2nvmbo6ogsclcwj3pe6m2n2mxgbjaxaw4f52fryg.py # Topologically Sorted Source Nodes: [output_24, output_25], Original ATen: [aten.convolution, aten._prelu_kernel] # Source node to ATen node mapping: # output_24 => convolution_10 # output_25 => gt_1, mul_1, where_1 # Graph fragment: # %convolution_10 : [num_users=4] = call_function[target=torch.ops.aten.convolution.default](args = (%where, %primals_23, %primals_24, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt_1 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_10, 0), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, %convolution_10), kwargs = {}) # %where_1 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %convolution_10, %mul_1), kwargs = {}) triton_poi_fused__prelu_kernel_convolution_8 = async_compile.triton('triton_poi_fused__prelu_kernel_convolution_8', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__prelu_kernel_convolution_8', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__prelu_kernel_convolution_8(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 65536 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 64) % 256 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp6 = tmp5 * tmp2 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(in_out_ptr0 + (x3), tmp2, None) tl.store(out_ptr0 + (x3), tmp7, None) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/kj/ckj5uk2mmrb3yeegzlgzqpriydhqxlogodwwqw7b4wbhd3giux5u.py # Topologically Sorted Source Nodes: [output_26, output_27], Original ATen: [aten.convolution, aten._prelu_kernel] # Source node to ATen node mapping: # output_26 => convolution_11 # output_27 => gt_2, mul_2, where_2 # Graph fragment: # %convolution_11 : [num_users=4] = call_function[target=torch.ops.aten.convolution.default](args = (%where_1, %primals_26, %primals_27, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt_2 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_11, 0), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_2, %convolution_11), kwargs = {}) # %where_2 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_2, %convolution_11, %mul_2), kwargs = {}) triton_poi_fused__prelu_kernel_convolution_9 = async_compile.triton('triton_poi_fused__prelu_kernel_convolution_9', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32768], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__prelu_kernel_convolution_9', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__prelu_kernel_convolution_9(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 32768 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 64) % 128 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp6 = tmp5 * tmp2 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(in_out_ptr0 + (x3), tmp2, None) tl.store(out_ptr0 + (x3), tmp7, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28 = args args.clear() assert_size_stride(primals_1, (64, 3, 3, 3), (27, 9, 3, 1)) assert_size_stride(primals_2, (64, ), (1, )) assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1)) assert_size_stride(primals_4, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_5, (64, ), (1, )) assert_size_stride(primals_6, (128, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_7, (128, ), (1, )) assert_size_stride(primals_8, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_9, (128, ), (1, )) assert_size_stride(primals_10, (256, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_11, (256, ), (1, )) assert_size_stride(primals_12, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_13, (256, ), (1, )) assert_size_stride(primals_14, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_15, (256, ), (1, )) assert_size_stride(primals_16, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_17, (256, ), (1, )) assert_size_stride(primals_18, (512, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_19, (512, ), (1, )) assert_size_stride(primals_20, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_21, (512, ), (1, )) assert_size_stride(primals_22, (512, ), (1, )) assert_size_stride(primals_23, (256, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_24, (256, ), (1, )) assert_size_stride(primals_25, (256, ), (1, )) assert_size_stride(primals_26, (128, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_27, (128, ), (1, )) assert_size_stride(primals_28, (128, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [output_1], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [output_1, output_2], Original ATen: [aten.convolution, aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 1048576, grid=grid(1048576), stream=stream0) del primals_2 # Topologically Sorted Source Nodes: [output_3], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf3 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [output_3, output_4], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_0.run(buf3, primals_5, 1048576, grid=grid(1048576), stream=stream0) del primals_5 buf4 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1), torch.float32) buf5 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1), torch.int8) # Topologically Sorted Source Nodes: [output_5], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_1.run(buf3, buf4, buf5, 262144, grid=grid(262144), stream=stream0) # Topologically Sorted Source Nodes: [output_6], Original ATen: [aten.convolution] buf6 = extern_kernels.convolution(buf4, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 128, 32, 32), (131072, 1024, 32, 1)) buf7 = buf6; del buf6 # reuse # Topologically Sorted Source Nodes: [output_6, output_7], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_2.run(buf7, primals_7, 524288, grid=grid(524288), stream=stream0) del primals_7 # Topologically Sorted Source Nodes: [output_8], Original ATen: [aten.convolution] buf8 = extern_kernels.convolution(buf7, primals_8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 128, 32, 32), (131072, 1024, 32, 1)) buf9 = buf8; del buf8 # reuse # Topologically Sorted Source Nodes: [output_8, output_9], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_2.run(buf9, primals_9, 524288, grid=grid(524288), stream=stream0) del primals_9 buf10 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1), torch.float32) buf11 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1), torch.int8) # Topologically Sorted Source Nodes: [output_10], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_3.run(buf9, buf10, buf11, 131072, grid=grid(131072), stream=stream0) # Topologically Sorted Source Nodes: [output_11], Original ATen: [aten.convolution] buf12 = extern_kernels.convolution(buf10, primals_10, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf12, (4, 256, 16, 16), (65536, 256, 16, 1)) buf13 = buf12; del buf12 # reuse # Topologically Sorted Source Nodes: [output_11, output_12], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_4.run(buf13, primals_11, 262144, grid=grid(262144), stream=stream0) del primals_11 # Topologically Sorted Source Nodes: [output_13], Original ATen: [aten.convolution] buf14 = extern_kernels.convolution(buf13, primals_12, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf14, (4, 256, 16, 16), (65536, 256, 16, 1)) buf15 = buf14; del buf14 # reuse # Topologically Sorted Source Nodes: [output_13, output_14], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_4.run(buf15, primals_13, 262144, grid=grid(262144), stream=stream0) del primals_13 # Topologically Sorted Source Nodes: [output_15], Original ATen: [aten.convolution] buf16 = extern_kernels.convolution(buf15, primals_14, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf16, (4, 256, 16, 16), (65536, 256, 16, 1)) buf17 = buf16; del buf16 # reuse # Topologically Sorted Source Nodes: [output_15, output_16], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_4.run(buf17, primals_15, 262144, grid=grid(262144), stream=stream0) del primals_15 # Topologically Sorted Source Nodes: [output_17], Original ATen: [aten.convolution] buf18 = extern_kernels.convolution(buf17, primals_16, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf18, (4, 256, 16, 16), (65536, 256, 16, 1)) buf19 = buf18; del buf18 # reuse # Topologically Sorted Source Nodes: [output_17, output_18], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_4.run(buf19, primals_17, 262144, grid=grid(262144), stream=stream0) del primals_17 buf20 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch.float32) buf21 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch.int8) # Topologically Sorted Source Nodes: [output_19], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_5.run(buf19, buf20, buf21, 65536, grid=grid(65536), stream=stream0) # Topologically Sorted Source Nodes: [output_20], Original ATen: [aten.convolution] buf22 = extern_kernels.convolution(buf20, primals_18, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf22, (4, 512, 8, 8), (32768, 64, 8, 1)) buf23 = buf22; del buf22 # reuse # Topologically Sorted Source Nodes: [output_20, output_21], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_6.run(buf23, primals_19, 131072, grid=grid(131072), stream=stream0) del primals_19 # Topologically Sorted Source Nodes: [output_22], Original ATen: [aten.convolution] buf24 = extern_kernels.convolution(buf23, primals_20, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf24, (4, 512, 8, 8), (32768, 64, 8, 1)) buf25 = buf24; del buf24 # reuse buf26 = empty_strided_cuda((4, 512, 8, 8), (32768, 64, 8, 1), torch.float32) # Topologically Sorted Source Nodes: [output_22, output_23], Original ATen: [aten.convolution, aten._prelu_kernel] triton_poi_fused__prelu_kernel_convolution_7.run(buf25, primals_21, primals_22, buf26, 131072, grid=grid(131072), stream=stream0) del primals_21 # Topologically Sorted Source Nodes: [output_24], Original ATen: [aten.convolution] buf27 = extern_kernels.convolution(buf26, primals_23, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf27, (4, 256, 8, 8), (16384, 64, 8, 1)) buf28 = buf27; del buf27 # reuse buf29 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch.float32) # Topologically Sorted Source Nodes: [output_24, output_25], Original ATen: [aten.convolution, aten._prelu_kernel] triton_poi_fused__prelu_kernel_convolution_8.run(buf28, primals_24, primals_25, buf29, 65536, grid=grid(65536), stream=stream0) del primals_24 # Topologically Sorted Source Nodes: [output_26], Original ATen: [aten.convolution] buf30 = extern_kernels.convolution(buf29, primals_26, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf30, (4, 128, 8, 8), (8192, 64, 8, 1)) buf31 = buf30; del buf30 # reuse buf32 = empty_strided_cuda((4, 128, 8, 8), (8192, 64, 8, 1), torch.float32) # Topologically Sorted Source Nodes: [output_26, output_27], Original ATen: [aten.convolution, aten._prelu_kernel] triton_poi_fused__prelu_kernel_convolution_9.run(buf31, primals_27, primals_28, buf32, 32768, grid=grid(32768), stream=stream0) del primals_27 return (buf32, primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, primals_12, primals_14, primals_16, primals_18, primals_20, primals_22, primals_23, primals_25, primals_26, primals_28, buf1, buf3, buf4, buf5, buf7, buf9, buf10, buf11, buf13, buf15, buf17, buf19, buf20, buf21, buf23, buf25, buf26, buf28, buf29, buf31, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((64, 3, 3, 3), (27, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 3, 64, 64), (12288, 4096, 64, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((128, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((256, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_13 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_14 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_15 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_16 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_17 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_18 = rand_strided((512, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_19 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_20 = rand_strided((512, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_21 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_22 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_23 = rand_strided((256, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_24 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_25 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_26 = rand_strided((128, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_27 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_28 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn from torch.nn import init class VGG_block(nn.Module): """ 1. default have the bias 2. using ReLU and 3 * max pooling 3. 10 layers of VGG original 4. 2 extra layers by CMU 5. default in_dim = 3,out_dim = 128 6. all kernal_size = 3, stride = 1 """ def __init__(self, in_dim=3, out_dim=128): super(VGG_block, self).__init__() self.conv1_1 = nn.Conv2d(3, 64, 3, 1, 1) self.relu1_1 = nn.ReLU(inplace=True) self.conv1_2 = nn.Conv2d(64, 64, 3, 1, 1) self.relu1_2 = nn.ReLU(inplace=True) self.pool_1 = nn.MaxPool2d(2, 2, 0) self.conv2_1 = nn.Conv2d(64, 128, 3, 1, 1) self.relu2_1 = nn.ReLU(inplace=True) self.conv2_2 = nn.Conv2d(128, 128, 3, 1, 1) self.relu2_2 = nn.ReLU(inplace=True) self.pool_2 = nn.MaxPool2d(2, 2, 0) self.conv3_1 = nn.Conv2d(128, 256, 3, 1, 1) self.relu3_1 = nn.ReLU(inplace=True) self.conv3_2 = nn.Conv2d(256, 256, 3, 1, 1) self.relu3_2 = nn.ReLU(inplace=True) self.conv3_3 = nn.Conv2d(256, 256, 3, 1, 1) self.relu3_3 = nn.ReLU(inplace=True) self.conv3_4 = nn.Conv2d(256, 256, 3, 1, 1) self.relu3_4 = nn.ReLU(inplace=True) self.pool_3 = nn.MaxPool2d(2, 2, 0) self.conv4_1 = nn.Conv2d(256, 512, 3, 1, 1) self.relu4_1 = nn.ReLU(inplace=True) self.conv4_2 = nn.Conv2d(512, 512, 3, 1, 1) self.relu4_2 = nn.PReLU(num_parameters=512) self.conv4_3_cmu = nn.Conv2d(512, 256, 3, 1, 1) self.relu4_3 = nn.PReLU(num_parameters=256) self.conv4_4_cmu = nn.Conv2d(256, 128, 3, 1, 1) self.relu4_4 = nn.PReLU(num_parameters=128) self.initilization() def forward(self, input_1): """inplace middle result """ output_1 = self.conv1_1(input_1) output_1 = self.relu1_1(output_1) output_1 = self.conv1_2(output_1) output_1 = self.relu1_2(output_1) output_1 = self.pool_1(output_1) output_1 = self.conv2_1(output_1) output_1 = self.relu2_1(output_1) output_1 = self.conv2_2(output_1) output_1 = self.relu2_2(output_1) output_1 = self.pool_2(output_1) output_1 = self.conv3_1(output_1) output_1 = self.relu3_1(output_1) output_1 = self.conv3_2(output_1) output_1 = self.relu3_2(output_1) output_1 = self.conv3_3(output_1) output_1 = self.relu3_3(output_1) output_1 = self.conv3_4(output_1) output_1 = self.relu3_4(output_1) output_1 = self.pool_3(output_1) output_1 = self.conv4_1(output_1) output_1 = self.relu4_1(output_1) output_1 = self.conv4_2(output_1) output_1 = self.relu4_2(output_1) output_1 = self.conv4_3_cmu(output_1) output_1 = self.relu4_3(output_1) output_1 = self.conv4_4_cmu(output_1) output_1 = self.relu4_4(output_1) return output_1 def initilization(self): for m in self.modules(): if isinstance(m, nn.Conv2d): init.xavier_normal_(m.weight) if m.bias is not None: init.constant_(m.bias, 0.0) else: try: init.constant_(m.weight, 0.0) except: pass def get_inputs(): return [torch.rand([4, 3, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn from torch.nn import init assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 64 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 32 x1 = xindex // 32 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 128 * x1), None, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 128 * x1), None, eviction_policy ='evict_last') tmp3 = tl.load(in_ptr0 + (64 + 2 * x0 + 128 * x1), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (65 + 2 * x0 + 128 * x1), None, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x2, tmp6, None) tl.store(out_ptr1 + x2, tmp16, None) @triton.jit def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 1024 % 128 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 64 * x1), None, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 64 * x1), None, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (32 + 2 * x0 + 64 * x1), None, eviction_policy ='evict_last') tmp5 = tl.load(in_ptr0 + (33 + 2 * x0 + 64 * x1), None, eviction_policy ='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x2, tmp6, None) tl.store(out_ptr1 + x2, tmp16, None) @triton.jit def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 256 % 256 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_5(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 32 * x1), None, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 32 * x1), None, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (16 + 2 * x0 + 32 * x1), None, eviction_policy ='evict_last') tmp5 = tl.load(in_ptr0 + (17 + 2 * x0 + 32 * x1), None, eviction_policy ='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x2, tmp6, None) tl.store(out_ptr1 + x2, tmp16, None) @triton.jit def triton_poi_fused_convolution_relu_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 64 % 512 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused__prelu_kernel_convolution_7(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 64 % 512 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp6 = tmp5 * tmp2 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(in_out_ptr0 + x3, tmp2, None) tl.store(out_ptr0 + x3, tmp7, None) @triton.jit def triton_poi_fused__prelu_kernel_convolution_8(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 64 % 256 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp6 = tmp5 * tmp2 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(in_out_ptr0 + x3, tmp2, None) tl.store(out_ptr0 + x3, tmp7, None) @triton.jit def triton_poi_fused__prelu_kernel_convolution_9(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 64 % 128 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp6 = tmp5 * tmp2 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(in_out_ptr0 + x3, tmp2, None) tl.store(out_ptr0 + x3, tmp7, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28 ) = args args.clear() assert_size_stride(primals_1, (64, 3, 3, 3), (27, 9, 3, 1)) assert_size_stride(primals_2, (64,), (1,)) assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1)) assert_size_stride(primals_4, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_5, (64,), (1,)) assert_size_stride(primals_6, (128, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_7, (128,), (1,)) assert_size_stride(primals_8, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_9, (128,), (1,)) assert_size_stride(primals_10, (256, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_11, (256,), (1,)) assert_size_stride(primals_12, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_13, (256,), (1,)) assert_size_stride(primals_14, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_15, (256,), (1,)) assert_size_stride(primals_16, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_17, (256,), (1,)) assert_size_stride(primals_18, (512, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_19, (512,), (1,)) assert_size_stride(primals_20, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_21, (512,), (1,)) assert_size_stride(primals_22, (512,), (1,)) assert_size_stride(primals_23, (256, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_24, (256,), (1,)) assert_size_stride(primals_25, (256,), (1,)) assert_size_stride(primals_26, (128, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_27, (128,), (1,)) assert_size_stride(primals_28, (128,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(1048576)](buf1, primals_2, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_relu_0[grid(1048576)](buf3, primals_5, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1), torch.float32) buf5 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1), torch.int8) triton_poi_fused_max_pool2d_with_indices_1[grid(262144)](buf3, buf4, buf5, 262144, XBLOCK=512, num_warps=8, num_stages=1) buf6 = extern_kernels.convolution(buf4, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 128, 32, 32), (131072, 1024, 32, 1)) buf7 = buf6 del buf6 triton_poi_fused_convolution_relu_2[grid(524288)](buf7, primals_7, 524288, XBLOCK=512, num_warps=8, num_stages=1) del primals_7 buf8 = extern_kernels.convolution(buf7, primals_8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 128, 32, 32), (131072, 1024, 32, 1)) buf9 = buf8 del buf8 triton_poi_fused_convolution_relu_2[grid(524288)](buf9, primals_9, 524288, XBLOCK=512, num_warps=8, num_stages=1) del primals_9 buf10 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1), torch.float32) buf11 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1), torch.int8) triton_poi_fused_max_pool2d_with_indices_3[grid(131072)](buf9, buf10, buf11, 131072, XBLOCK=512, num_warps=8, num_stages=1) buf12 = extern_kernels.convolution(buf10, primals_10, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf12, (4, 256, 16, 16), (65536, 256, 16, 1)) buf13 = buf12 del buf12 triton_poi_fused_convolution_relu_4[grid(262144)](buf13, primals_11, 262144, XBLOCK=1024, num_warps=4, num_stages=1) del primals_11 buf14 = extern_kernels.convolution(buf13, primals_12, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf14, (4, 256, 16, 16), (65536, 256, 16, 1)) buf15 = buf14 del buf14 triton_poi_fused_convolution_relu_4[grid(262144)](buf15, primals_13, 262144, XBLOCK=1024, num_warps=4, num_stages=1) del primals_13 buf16 = extern_kernels.convolution(buf15, primals_14, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf16, (4, 256, 16, 16), (65536, 256, 16, 1)) buf17 = buf16 del buf16 triton_poi_fused_convolution_relu_4[grid(262144)](buf17, primals_15, 262144, XBLOCK=1024, num_warps=4, num_stages=1) del primals_15 buf18 = extern_kernels.convolution(buf17, primals_16, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf18, (4, 256, 16, 16), (65536, 256, 16, 1)) buf19 = buf18 del buf18 triton_poi_fused_convolution_relu_4[grid(262144)](buf19, primals_17, 262144, XBLOCK=1024, num_warps=4, num_stages=1) del primals_17 buf20 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch .float32) buf21 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch .int8) triton_poi_fused_max_pool2d_with_indices_5[grid(65536)](buf19, buf20, buf21, 65536, XBLOCK=256, num_warps=4, num_stages=1) buf22 = extern_kernels.convolution(buf20, primals_18, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf22, (4, 512, 8, 8), (32768, 64, 8, 1)) buf23 = buf22 del buf22 triton_poi_fused_convolution_relu_6[grid(131072)](buf23, primals_19, 131072, XBLOCK=512, num_warps=8, num_stages=1) del primals_19 buf24 = extern_kernels.convolution(buf23, primals_20, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf24, (4, 512, 8, 8), (32768, 64, 8, 1)) buf25 = buf24 del buf24 buf26 = empty_strided_cuda((4, 512, 8, 8), (32768, 64, 8, 1), torch .float32) triton_poi_fused__prelu_kernel_convolution_7[grid(131072)](buf25, primals_21, primals_22, buf26, 131072, XBLOCK=512, num_warps=8, num_stages=1) del primals_21 buf27 = extern_kernels.convolution(buf26, primals_23, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf27, (4, 256, 8, 8), (16384, 64, 8, 1)) buf28 = buf27 del buf27 buf29 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch .float32) triton_poi_fused__prelu_kernel_convolution_8[grid(65536)](buf28, primals_24, primals_25, buf29, 65536, XBLOCK=512, num_warps=4, num_stages=1) del primals_24 buf30 = extern_kernels.convolution(buf29, primals_26, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf30, (4, 128, 8, 8), (8192, 64, 8, 1)) buf31 = buf30 del buf30 buf32 = empty_strided_cuda((4, 128, 8, 8), (8192, 64, 8, 1), torch. float32) triton_poi_fused__prelu_kernel_convolution_9[grid(32768)](buf31, primals_27, primals_28, buf32, 32768, XBLOCK=256, num_warps=4, num_stages=1) del primals_27 return (buf32, primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, primals_12, primals_14, primals_16, primals_18, primals_20, primals_22, primals_23, primals_25, primals_26, primals_28, buf1, buf3, buf4, buf5, buf7, buf9, buf10, buf11, buf13, buf15, buf17, buf19, buf20, buf21, buf23, buf25, buf26, buf28, buf29, buf31) class VGG_blockNew(nn.Module): """ 1. default have the bias 2. using ReLU and 3 * max pooling 3. 10 layers of VGG original 4. 2 extra layers by CMU 5. default in_dim = 3,out_dim = 128 6. all kernal_size = 3, stride = 1 """ def __init__(self, in_dim=3, out_dim=128): super(VGG_blockNew, self).__init__() self.conv1_1 = nn.Conv2d(3, 64, 3, 1, 1) self.relu1_1 = nn.ReLU(inplace=True) self.conv1_2 = nn.Conv2d(64, 64, 3, 1, 1) self.relu1_2 = nn.ReLU(inplace=True) self.pool_1 = nn.MaxPool2d(2, 2, 0) self.conv2_1 = nn.Conv2d(64, 128, 3, 1, 1) self.relu2_1 = nn.ReLU(inplace=True) self.conv2_2 = nn.Conv2d(128, 128, 3, 1, 1) self.relu2_2 = nn.ReLU(inplace=True) self.pool_2 = nn.MaxPool2d(2, 2, 0) self.conv3_1 = nn.Conv2d(128, 256, 3, 1, 1) self.relu3_1 = nn.ReLU(inplace=True) self.conv3_2 = nn.Conv2d(256, 256, 3, 1, 1) self.relu3_2 = nn.ReLU(inplace=True) self.conv3_3 = nn.Conv2d(256, 256, 3, 1, 1) self.relu3_3 = nn.ReLU(inplace=True) self.conv3_4 = nn.Conv2d(256, 256, 3, 1, 1) self.relu3_4 = nn.ReLU(inplace=True) self.pool_3 = nn.MaxPool2d(2, 2, 0) self.conv4_1 = nn.Conv2d(256, 512, 3, 1, 1) self.relu4_1 = nn.ReLU(inplace=True) self.conv4_2 = nn.Conv2d(512, 512, 3, 1, 1) self.relu4_2 = nn.PReLU(num_parameters=512) self.conv4_3_cmu = nn.Conv2d(512, 256, 3, 1, 1) self.relu4_3 = nn.PReLU(num_parameters=256) self.conv4_4_cmu = nn.Conv2d(256, 128, 3, 1, 1) self.relu4_4 = nn.PReLU(num_parameters=128) self.initilization() def initilization(self): for m in self.modules(): if isinstance(m, nn.Conv2d): init.xavier_normal_(m.weight) if m.bias is not None: init.constant_(m.bias, 0.0) else: try: init.constant_(m.weight, 0.0) except: pass def forward(self, input_0): primals_1 = self.conv1_1.weight primals_2 = self.conv1_1.bias primals_4 = self.conv1_2.weight primals_5 = self.conv1_2.bias primals_6 = self.conv2_1.weight primals_7 = self.conv2_1.bias primals_8 = self.conv2_2.weight primals_9 = self.conv2_2.bias primals_10 = self.conv3_1.weight primals_11 = self.conv3_1.bias primals_12 = self.conv3_2.weight primals_13 = self.conv3_2.bias primals_14 = self.conv3_3.weight primals_15 = self.conv3_3.bias primals_16 = self.conv3_4.weight primals_17 = self.conv3_4.bias primals_18 = self.conv4_1.weight primals_19 = self.conv4_1.bias primals_20 = self.conv4_2.weight primals_21 = self.conv4_2.bias primals_22 = self.relu4_2.weight primals_23 = self.conv4_3_cmu.weight primals_24 = self.conv4_3_cmu.bias primals_25 = self.relu4_3.weight primals_26 = self.conv4_4_cmu.weight primals_27 = self.conv4_4_cmu.bias primals_28 = self.relu4_4.weight primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28]) return output[0]
H-Liu1997/Pytorch_Pose_Estimation_Framework
VGG_block
false
5,315
[ "MIT" ]
1
06616b3459ff639f8486e6ea4f93922597788b2a
https://github.com/H-Liu1997/Pytorch_Pose_Estimation_Framework/tree/06616b3459ff639f8486e6ea4f93922597788b2a
import torch import torch.nn as nn from torch.nn import init class Model(nn.Module): """ 1. default have the bias 2. using ReLU and 3 * max pooling 3. 10 layers of VGG original 4. 2 extra layers by CMU 5. default in_dim = 3,out_dim = 128 6. all kernal_size = 3, stride = 1 """ def __init__(self, in_dim=3, out_dim=128): super().__init__() self.conv1_1 = nn.Conv2d(3, 64, 3, 1, 1) self.relu1_1 = nn.ReLU(inplace=True) self.conv1_2 = nn.Conv2d(64, 64, 3, 1, 1) self.relu1_2 = nn.ReLU(inplace=True) self.pool_1 = nn.MaxPool2d(2, 2, 0) self.conv2_1 = nn.Conv2d(64, 128, 3, 1, 1) self.relu2_1 = nn.ReLU(inplace=True) self.conv2_2 = nn.Conv2d(128, 128, 3, 1, 1) self.relu2_2 = nn.ReLU(inplace=True) self.pool_2 = nn.MaxPool2d(2, 2, 0) self.conv3_1 = nn.Conv2d(128, 256, 3, 1, 1) self.relu3_1 = nn.ReLU(inplace=True) self.conv3_2 = nn.Conv2d(256, 256, 3, 1, 1) self.relu3_2 = nn.ReLU(inplace=True) self.conv3_3 = nn.Conv2d(256, 256, 3, 1, 1) self.relu3_3 = nn.ReLU(inplace=True) self.conv3_4 = nn.Conv2d(256, 256, 3, 1, 1) self.relu3_4 = nn.ReLU(inplace=True) self.pool_3 = nn.MaxPool2d(2, 2, 0) self.conv4_1 = nn.Conv2d(256, 512, 3, 1, 1) self.relu4_1 = nn.ReLU(inplace=True) self.conv4_2 = nn.Conv2d(512, 512, 3, 1, 1) self.relu4_2 = nn.PReLU(num_parameters=512) self.conv4_3_cmu = nn.Conv2d(512, 256, 3, 1, 1) self.relu4_3 = nn.PReLU(num_parameters=256) self.conv4_4_cmu = nn.Conv2d(256, 128, 3, 1, 1) self.relu4_4 = nn.PReLU(num_parameters=128) self.initilization() def forward(self, input_1): """inplace middle result """ output_1 = self.conv1_1(input_1) output_1 = self.relu1_1(output_1) output_1 = self.conv1_2(output_1) output_1 = self.relu1_2(output_1) output_1 = self.pool_1(output_1) output_1 = self.conv2_1(output_1) output_1 = self.relu2_1(output_1) output_1 = self.conv2_2(output_1) output_1 = self.relu2_2(output_1) output_1 = self.pool_2(output_1) output_1 = self.conv3_1(output_1) output_1 = self.relu3_1(output_1) output_1 = self.conv3_2(output_1) output_1 = self.relu3_2(output_1) output_1 = self.conv3_3(output_1) output_1 = self.relu3_3(output_1) output_1 = self.conv3_4(output_1) output_1 = self.relu3_4(output_1) output_1 = self.pool_3(output_1) output_1 = self.conv4_1(output_1) output_1 = self.relu4_1(output_1) output_1 = self.conv4_2(output_1) output_1 = self.relu4_2(output_1) output_1 = self.conv4_3_cmu(output_1) output_1 = self.relu4_3(output_1) output_1 = self.conv4_4_cmu(output_1) output_1 = self.relu4_4(output_1) return output_1 def initilization(self): for m in self.modules(): if isinstance(m, nn.Conv2d): init.xavier_normal_(m.weight) if m.bias is not None: init.constant_(m.bias, 0.0) else: try: init.constant_(m.weight, 0.0) except: pass def get_inputs(): return [torch.rand([4, 3, 64, 64])] def get_init_inputs(): return []
SSE
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/76/c76fhhyrznrnuxjw2jcfyhzefm7zizdizmg5qnls6bytphhnlc4k.py # Topologically Sorted Source Nodes: [sub, res, lt], Original ATen: [aten.sub, aten.pow, aten.lt] # Source node to ATen node mapping: # lt => lt # res => pow_1 # sub => sub # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %arg1_1), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {}) # %lt : [num_users=1] = call_function[target=torch.ops.aten.lt.Tensor](args = (%arg0_1, %arg1_1), kwargs = {}) triton_poi_fused_lt_pow_sub_0 = async_compile.triton('triton_poi_fused_lt_pow_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_lt_pow_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_lt_pow_sub_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp4 = tmp0 < tmp1 tl.store(out_ptr0 + (x0), tmp3, xmask) tl.store(out_ptr1 + (x0), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [sub, res, lt], Original ATen: [aten.sub, aten.pow, aten.lt] stream0 = get_raw_stream(0) triton_poi_fused_lt_pow_sub_0.run(arg0_1, arg1_1, buf0, buf1, 256, grid=grid(256), stream=stream0) del arg0_1 del arg1_1 return (buf0, buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch.nn.modules.loss import _Loss class SSE(_Loss): """ Definition: sum_squared_error = 1/2 * nn.MSELoss(reduction = 'sum') The backward is defined as: input-target """ def __init__(self, under_penalty, over_penalty): super(SSE, self).__init__(under_penalty, over_penalty) self.under_penalty = under_penalty self.over_penalty = over_penalty def forward(self, input, target): res = (input - target) ** 2 res[input < target] = res[input < target].mul(self.under_penalty) res[input > target] = res[input > target].mul(self.over_penalty) return res.sum() / 2 def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'under_penalty': 4, 'over_penalty': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch.nn.modules.loss import _Loss assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_lt_pow_sub_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp4 = tmp0 < tmp1 tl.store(out_ptr0 + x0, tmp3, xmask) tl.store(out_ptr1 + x0, tmp4, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_lt_pow_sub_0[grid(256)](arg0_1, arg1_1, buf0, buf1, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf0, buf1 class SSENew(_Loss): """ Definition: sum_squared_error = 1/2 * nn.MSELoss(reduction = 'sum') The backward is defined as: input-target """ def __init__(self, under_penalty, over_penalty): super(SSENew, self).__init__(under_penalty, over_penalty) self.under_penalty = under_penalty self.over_penalty = over_penalty def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
IVRL/CCID
SSE
false
5,316
[ "MIT" ]
1
0d57c33696da87279d24777a2efd1204f5088bc9
https://github.com/IVRL/CCID/tree/0d57c33696da87279d24777a2efd1204f5088bc9
import torch from torch.nn.modules.loss import _Loss class Model(_Loss): """ Definition: sum_squared_error = 1/2 * nn.MSELoss(reduction = 'sum') The backward is defined as: input-target """ def __init__(self, under_penalty, over_penalty): super().__init__(under_penalty, over_penalty) self.under_penalty = under_penalty self.over_penalty = over_penalty def forward(self, input, target): res = (input - target) ** 2 res[input < target] = res[input < target].mul(self.under_penalty) res[input > target] = res[input > target].mul(self.over_penalty) return res.sum() / 2 def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 4]
PixelNorm
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/oj/cojs3rolrbuzbumcv56hzrpmpk5vjaaabxquccaenqx3ti3blyt2.py # Topologically Sorted Source Nodes: [tmp, mean, add, tmp1, mul_1], Original ATen: [aten.mul, aten.mean, aten.add, aten.rsqrt] # Source node to ATen node mapping: # add => add # mean => mean # mul_1 => mul_1 # tmp => mul # tmp1 => rsqrt # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %arg0_1), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%mul, [1], True), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean, 1e-08), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %rsqrt), kwargs = {}) triton_poi_fused_add_mean_mul_rsqrt_0 = async_compile.triton('triton_poi_fused_add_mean_mul_rsqrt_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mean_mul_rsqrt_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mean_mul_rsqrt_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = 4.0 tmp13 = tmp11 / tmp12 tmp14 = 1e-08 tmp15 = tmp13 + tmp14 tmp16 = libdevice.rsqrt(tmp15) tmp17 = tmp0 * tmp16 tl.store(out_ptr0 + (x3), tmp17, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [tmp, mean, add, tmp1, mul_1], Original ATen: [aten.mul, aten.mean, aten.add, aten.rsqrt] stream0 = get_raw_stream(0) triton_poi_fused_add_mean_mul_rsqrt_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class PixelNorm(nn.Module): def __init__(self, epsilon=1e-08): """ @notice: avoid in-place ops. https://discuss.pytorch.org/t/encounter-the-runtimeerror-one-of-the-variables-needed-for-gradient-computation-has-been-modified-by-an-inplace-operation/836/3 """ super(PixelNorm, self).__init__() self.epsilon = epsilon def forward(self, x): tmp = torch.mul(x, x) tmp1 = torch.rsqrt(torch.mean(tmp, dim=1, keepdim=True) + self.epsilon) return x * tmp1 def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_mean_mul_rsqrt_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = 4.0 tmp13 = tmp11 / tmp12 tmp14 = 1e-08 tmp15 = tmp13 + tmp14 tmp16 = libdevice.rsqrt(tmp15) tmp17 = tmp0 * tmp16 tl.store(out_ptr0 + x3, tmp17, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mean_mul_rsqrt_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class PixelNormNew(nn.Module): def __init__(self, epsilon=1e-08): """ @notice: avoid in-place ops. https://discuss.pytorch.org/t/encounter-the-runtimeerror-one-of-the-variables-needed-for-gradient-computation-has-been-modified-by-an-inplace-operation/836/3 """ super(PixelNormNew, self).__init__() self.epsilon = epsilon def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
Iceland-Leo/StyleGAN2_PyTorch
PixelNorm
false
5,318
[ "MIT" ]
1
3621f5e4ba1c7fde7e2fae1f4700d050656a0b02
https://github.com/Iceland-Leo/StyleGAN2_PyTorch/tree/3621f5e4ba1c7fde7e2fae1f4700d050656a0b02
import torch import torch.nn as nn class Model(nn.Module): def __init__(self, epsilon=1e-08): """ @notice: avoid in-place ops. https://discuss.pytorch.org/t/encounter-the-runtimeerror-one-of-the-variables-needed-for-gradient-computation-has-been-modified-by-an-inplace-operation/836/3 """ super().__init__() self.epsilon = epsilon def forward(self, x): tmp = torch.mul(x, x) tmp1 = torch.rsqrt(torch.mean(tmp, dim=1, keepdim=True) + self.epsilon) return x * tmp1 def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
Net
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/6k/c6k5ixw3gkgdotheeugymmtgeh75ct7oabgoerynuxknqlmt2uhf.py # Topologically Sorted Source Nodes: [conv2d, c1], Original ATen: [aten.convolution, aten.leaky_relu] # Source node to ATen node mapping: # c1 => gt, mul, where # conv2d => convolution # Graph fragment: # %convolution : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution, 0), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, 0.2), kwargs = {}) # %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt, %convolution, %mul), kwargs = {}) triton_poi_fused_convolution_leaky_relu_0 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[524288], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_leaky_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 524288 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 4096) % 32 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(in_out_ptr0 + (x3), tmp7, None) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/36/c36goqekbheqmzqx63ibehvw5xzi6nve5f33bertb3dmpfgep4fh.py # Topologically Sorted Source Nodes: [p1], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # p1 => getitem, getitem_1 # Graph fragment: # %getitem : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {}) # %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_1 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[131072], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 131072 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex % 32 x1 = (xindex // 32) x2 = xindex tmp0 = tl.load(in_ptr0 + ((2*x0) + (128*x1)), None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (128*x1)), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (64 + (2*x0) + (128*x1)), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (65 + (2*x0) + (128*x1)), None, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x2), tmp6, None) tl.store(out_ptr1 + (x2), tmp16, None) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/hb/chbofhfqk7tjxxcl5qvjct2atyl4m3tpq432hh6und4mb6ket6hk.py # Topologically Sorted Source Nodes: [conv2d_2, c2], Original ATen: [aten.convolution, aten.leaky_relu] # Source node to ATen node mapping: # c2 => gt_2, mul_2, where_2 # conv2d_2 => convolution_2 # Graph fragment: # %convolution_2 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem, %primals_6, %primals_7, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt_2 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_2, 0), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_2, 0.2), kwargs = {}) # %where_2 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_2, %convolution_2, %mul_2), kwargs = {}) triton_poi_fused_convolution_leaky_relu_2 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[262144], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_leaky_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 262144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 1024) % 64 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(in_out_ptr0 + (x3), tmp7, None) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/gq/cgqvpghyzwkbypvbvlyig6ohuplw6qvhn73hkwj6auj5e2m5mqio.py # Topologically Sorted Source Nodes: [p2], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # p2 => getitem_2, getitem_3 # Graph fragment: # %getitem_2 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 0), kwargs = {}) # %getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_3 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 65536 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex % 16 x1 = (xindex // 16) x2 = xindex tmp0 = tl.load(in_ptr0 + ((2*x0) + (64*x1)), None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (64*x1)), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (32 + (2*x0) + (64*x1)), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (33 + (2*x0) + (64*x1)), None, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x2), tmp6, None) tl.store(out_ptr1 + (x2), tmp16, None) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/ft/cftbr6dgoj5dvlis5nxtv52omekjotxvascaqkaneeq2evectjv6.py # Topologically Sorted Source Nodes: [conv2d_4, c3], Original ATen: [aten.convolution, aten.leaky_relu] # Source node to ATen node mapping: # c3 => gt_4, mul_4, where_4 # conv2d_4 => convolution_4 # Graph fragment: # %convolution_4 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_2, %primals_10, %primals_11, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt_4 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_4, 0), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_4, 0.2), kwargs = {}) # %where_4 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_4, %convolution_4, %mul_4), kwargs = {}) triton_poi_fused_convolution_leaky_relu_4 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[131072], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_leaky_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 131072 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 256) % 128 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(in_out_ptr0 + (x3), tmp7, None) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/sa/csaftzx2wizpjlh3kfhvehexsbodntymkhlieviiunpbabb3qdpw.py # Topologically Sorted Source Nodes: [p3], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # p3 => getitem_4, getitem_5 # Graph fragment: # %getitem_4 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_2, 0), kwargs = {}) # %getitem_5 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_2, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_5 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32768], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_5(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 32768 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex % 8 x1 = (xindex // 8) x2 = xindex tmp0 = tl.load(in_ptr0 + ((2*x0) + (32*x1)), None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (32*x1)), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (16 + (2*x0) + (32*x1)), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (17 + (2*x0) + (32*x1)), None, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x2), tmp6, None) tl.store(out_ptr1 + (x2), tmp16, None) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/hy/chy4ykeys6p4rw4dz4r5p453hencz5nwjmk2zltdwlfmiuxi67eb.py # Topologically Sorted Source Nodes: [conv2d_6, c4], Original ATen: [aten.convolution, aten.leaky_relu] # Source node to ATen node mapping: # c4 => gt_6, mul_6, where_6 # conv2d_6 => convolution_6 # Graph fragment: # %convolution_6 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_4, %primals_14, %primals_15, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt_6 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_6, 0), kwargs = {}) # %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_6, 0.2), kwargs = {}) # %where_6 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_6, %convolution_6, %mul_6), kwargs = {}) triton_poi_fused_convolution_leaky_relu_6 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_6', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_leaky_relu_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 65536 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 64) % 256 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(in_out_ptr0 + (x3), tmp7, None) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/v4/cv4bgr5ihpscky5gololr6sh7rfrpysfa3a6blhdfz537nybxcot.py # Topologically Sorted Source Nodes: [p4], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # p4 => getitem_6, getitem_7 # Graph fragment: # %getitem_6 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_3, 0), kwargs = {}) # %getitem_7 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_3, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_7 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_7', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16384], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_7(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + ((2*x0) + (16*x1)), None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (16*x1)), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (8 + (2*x0) + (16*x1)), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (9 + (2*x0) + (16*x1)), None, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x2), tmp6, None) tl.store(out_ptr1 + (x2), tmp16, None) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/bz/cbzp5bgw6fa6nsnqwbn3524d4zh6wo4yln6binir2cxpmie7ne2d.py # Topologically Sorted Source Nodes: [conv2d_8, c5], Original ATen: [aten.convolution, aten.leaky_relu] # Source node to ATen node mapping: # c5 => gt_8, mul_8, where_8 # conv2d_8 => convolution_8 # Graph fragment: # %convolution_8 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_6, %primals_18, %primals_19, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt_8 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_8, 0), kwargs = {}) # %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_8, 0.2), kwargs = {}) # %where_8 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_8, %convolution_8, %mul_8), kwargs = {}) triton_poi_fused_convolution_leaky_relu_8 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_8', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32768], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_8', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_leaky_relu_8(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 32768 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 16) % 512 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(in_out_ptr0 + (x3), tmp7, None) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/4q/c4q7b54jkvdrigbh2hhwfdixd3omegnlfydgs7vrdotzztryz2d4.py # Topologically Sorted Source Nodes: [up6_1], Original ATen: [aten.cat] # Source node to ATen node mapping: # up6_1 => cat # Graph fragment: # %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%convolution_10, %where_7], 1), kwargs = {}) triton_poi_fused_cat_9 = async_compile.triton('triton_poi_fused_cat_9', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[131072], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_9(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 131072 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x1 = (xindex // 64) % 512 x0 = xindex % 64 x2 = (xindex // 32768) x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 256, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + (64*x1) + (16384*x2)), tmp4, other=0.0) tmp6 = tl.load(in_ptr1 + (x1), tmp4, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype) tmp9 = tl.where(tmp4, tmp7, tmp8) tmp10 = tmp0 >= tmp3 tmp11 = tl.full([1], 512, tl.int64) tmp12 = tmp0 < tmp11 tmp13 = tl.load(in_ptr2 + (x0 + (64*((-256) + x1)) + (16384*x2)), tmp10, other=0.0) tmp14 = tl.where(tmp4, tmp9, tmp13) tl.store(out_ptr0 + (x3), tmp14, None) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/ij/cija3olw6cr62rftfqhakzgcme3krfny32x734h6puwoynmn36e3.py # Topologically Sorted Source Nodes: [up7_1], Original ATen: [aten.cat] # Source node to ATen node mapping: # up7_1 => cat_1 # Graph fragment: # %cat_1 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%convolution_13, %where_5], 1), kwargs = {}) triton_poi_fused_cat_10 = async_compile.triton('triton_poi_fused_cat_10', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[262144], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_10', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_10(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 262144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x1 = (xindex // 256) % 256 x0 = xindex % 256 x2 = (xindex // 65536) x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 128, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + (256*x1) + (32768*x2)), tmp4, other=0.0) tmp6 = tl.load(in_ptr1 + (x1), tmp4, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype) tmp9 = tl.where(tmp4, tmp7, tmp8) tmp10 = tmp0 >= tmp3 tmp11 = tl.full([1], 256, tl.int64) tmp12 = tmp0 < tmp11 tmp13 = tl.load(in_ptr2 + (x0 + (256*((-128) + x1)) + (32768*x2)), tmp10, other=0.0) tmp14 = tl.where(tmp4, tmp9, tmp13) tl.store(out_ptr0 + (x3), tmp14, None) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/n7/cn72godhzk2nf2qoevmkpkh7utghrsdupxxpx6bnjekioqeej2cz.py # Topologically Sorted Source Nodes: [up8_1], Original ATen: [aten.cat] # Source node to ATen node mapping: # up8_1 => cat_2 # Graph fragment: # %cat_2 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%convolution_16, %where_3], 1), kwargs = {}) triton_poi_fused_cat_11 = async_compile.triton('triton_poi_fused_cat_11', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[524288], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_11', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_11(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 524288 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x1 = (xindex // 1024) % 128 x0 = xindex % 1024 x2 = (xindex // 131072) x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 64, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + (1024*x1) + (65536*x2)), tmp4, other=0.0) tmp6 = tl.load(in_ptr1 + (x1), tmp4, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype) tmp9 = tl.where(tmp4, tmp7, tmp8) tmp10 = tmp0 >= tmp3 tmp11 = tl.full([1], 128, tl.int64) tmp12 = tmp0 < tmp11 tmp13 = tl.load(in_ptr2 + (x0 + (1024*((-64) + x1)) + (65536*x2)), tmp10, other=0.0) tmp14 = tl.where(tmp4, tmp9, tmp13) tl.store(out_ptr0 + (x3), tmp14, None) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/el/cely3mvdjhv7xihjloi6dtillfxfr2cgcvve2nzawetqs4vi5xjq.py # Topologically Sorted Source Nodes: [up9_1], Original ATen: [aten.cat] # Source node to ATen node mapping: # up9_1 => cat_3 # Graph fragment: # %cat_3 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%convolution_19, %where_1], 1), kwargs = {}) triton_poi_fused_cat_12 = async_compile.triton('triton_poi_fused_cat_12', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1048576], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_12', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_12(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1048576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x1 = (xindex // 4096) % 64 x0 = xindex % 4096 x2 = (xindex // 262144) x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 32, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + (4096*x1) + (131072*x2)), tmp4, other=0.0) tmp6 = tl.load(in_ptr1 + (x1), tmp4, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype) tmp9 = tl.where(tmp4, tmp7, tmp8) tmp10 = tmp0 >= tmp3 tmp11 = tl.full([1], 64, tl.int64) tmp12 = tmp0 < tmp11 tmp13 = tl.load(in_ptr2 + (x0 + (4096*((-32) + x1)) + (131072*x2)), tmp10, other=0.0) tmp14 = tl.where(tmp4, tmp9, tmp13) tl.store(out_ptr0 + (x3), tmp14, None) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/wn/cwn3n6pc3p5yfpg2qoyin6heoasg375jez7odcopiodaao5bowrc.py # Topologically Sorted Source Nodes: [out], Original ATen: [aten.pixel_shuffle] # Source node to ATen node mapping: # out => clone # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_pixel_shuffle_13 = async_compile.triton('triton_poi_fused_pixel_shuffle_13', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[131072, 2], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_pixel_shuffle_13', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_pixel_shuffle_13(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 98304 xnumel = 2 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x5 = xindex y0 = yindex % 64 y1 = (yindex // 64) % 2 y2 = (yindex // 128) % 64 y6 = (yindex // 8192) y3 = (yindex // 8192) % 3 y7 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (64*y2) + (4096*x5) + (8192*y1) + (16384*y6)), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x5 + (2*y1) + (4*y3)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x5 + (2*y7)), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47 = args args.clear() assert_size_stride(primals_1, (32, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (32, ), (1, )) assert_size_stride(primals_3, (4, 4, 64, 64), (16384, 4096, 64, 1)) assert_size_stride(primals_4, (32, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_5, (32, ), (1, )) assert_size_stride(primals_6, (64, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_7, (64, ), (1, )) assert_size_stride(primals_8, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_9, (64, ), (1, )) assert_size_stride(primals_10, (128, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_11, (128, ), (1, )) assert_size_stride(primals_12, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_13, (128, ), (1, )) assert_size_stride(primals_14, (256, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_15, (256, ), (1, )) assert_size_stride(primals_16, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_17, (256, ), (1, )) assert_size_stride(primals_18, (512, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_19, (512, ), (1, )) assert_size_stride(primals_20, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_21, (512, ), (1, )) assert_size_stride(primals_22, (512, 256, 2, 2), (1024, 4, 2, 1)) assert_size_stride(primals_23, (256, ), (1, )) assert_size_stride(primals_24, (256, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_25, (256, ), (1, )) assert_size_stride(primals_26, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_27, (256, ), (1, )) assert_size_stride(primals_28, (256, 128, 2, 2), (512, 4, 2, 1)) assert_size_stride(primals_29, (128, ), (1, )) assert_size_stride(primals_30, (128, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_31, (128, ), (1, )) assert_size_stride(primals_32, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_33, (128, ), (1, )) assert_size_stride(primals_34, (128, 64, 2, 2), (256, 4, 2, 1)) assert_size_stride(primals_35, (64, ), (1, )) assert_size_stride(primals_36, (64, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_37, (64, ), (1, )) assert_size_stride(primals_38, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_39, (64, ), (1, )) assert_size_stride(primals_40, (64, 32, 2, 2), (128, 4, 2, 1)) assert_size_stride(primals_41, (32, ), (1, )) assert_size_stride(primals_42, (32, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_43, (32, ), (1, )) assert_size_stride(primals_44, (32, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_45, (32, ), (1, )) assert_size_stride(primals_46, (12, 32, 1, 1), (32, 1, 1, 1)) assert_size_stride(primals_47, (12, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 32, 64, 64), (131072, 4096, 64, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [conv2d, c1], Original ATen: [aten.convolution, aten.leaky_relu] stream0 = get_raw_stream(0) triton_poi_fused_convolution_leaky_relu_0.run(buf1, primals_2, 524288, grid=grid(524288), stream=stream0) del primals_2 # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 32, 64, 64), (131072, 4096, 64, 1)) buf3 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [conv2d_1, c1_1], Original ATen: [aten.convolution, aten.leaky_relu] triton_poi_fused_convolution_leaky_relu_0.run(buf3, primals_5, 524288, grid=grid(524288), stream=stream0) del primals_5 buf4 = empty_strided_cuda((4, 32, 32, 32), (32768, 1024, 32, 1), torch.float32) buf5 = empty_strided_cuda((4, 32, 32, 32), (32768, 1024, 32, 1), torch.int8) # Topologically Sorted Source Nodes: [p1], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_1.run(buf3, buf4, buf5, 131072, grid=grid(131072), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution] buf6 = extern_kernels.convolution(buf4, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 64, 32, 32), (65536, 1024, 32, 1)) buf7 = buf6; del buf6 # reuse # Topologically Sorted Source Nodes: [conv2d_2, c2], Original ATen: [aten.convolution, aten.leaky_relu] triton_poi_fused_convolution_leaky_relu_2.run(buf7, primals_7, 262144, grid=grid(262144), stream=stream0) del primals_7 # Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution] buf8 = extern_kernels.convolution(buf7, primals_8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 64, 32, 32), (65536, 1024, 32, 1)) buf9 = buf8; del buf8 # reuse # Topologically Sorted Source Nodes: [conv2d_3, c2_1], Original ATen: [aten.convolution, aten.leaky_relu] triton_poi_fused_convolution_leaky_relu_2.run(buf9, primals_9, 262144, grid=grid(262144), stream=stream0) del primals_9 buf10 = empty_strided_cuda((4, 64, 16, 16), (16384, 256, 16, 1), torch.float32) buf11 = empty_strided_cuda((4, 64, 16, 16), (16384, 256, 16, 1), torch.int8) # Topologically Sorted Source Nodes: [p2], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_3.run(buf9, buf10, buf11, 65536, grid=grid(65536), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_4], Original ATen: [aten.convolution] buf12 = extern_kernels.convolution(buf10, primals_10, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf12, (4, 128, 16, 16), (32768, 256, 16, 1)) buf13 = buf12; del buf12 # reuse # Topologically Sorted Source Nodes: [conv2d_4, c3], Original ATen: [aten.convolution, aten.leaky_relu] triton_poi_fused_convolution_leaky_relu_4.run(buf13, primals_11, 131072, grid=grid(131072), stream=stream0) del primals_11 # Topologically Sorted Source Nodes: [conv2d_5], Original ATen: [aten.convolution] buf14 = extern_kernels.convolution(buf13, primals_12, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf14, (4, 128, 16, 16), (32768, 256, 16, 1)) buf15 = buf14; del buf14 # reuse # Topologically Sorted Source Nodes: [conv2d_5, c3_1], Original ATen: [aten.convolution, aten.leaky_relu] triton_poi_fused_convolution_leaky_relu_4.run(buf15, primals_13, 131072, grid=grid(131072), stream=stream0) del primals_13 buf16 = empty_strided_cuda((4, 128, 8, 8), (8192, 64, 8, 1), torch.float32) buf17 = empty_strided_cuda((4, 128, 8, 8), (8192, 64, 8, 1), torch.int8) # Topologically Sorted Source Nodes: [p3], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_5.run(buf15, buf16, buf17, 32768, grid=grid(32768), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_6], Original ATen: [aten.convolution] buf18 = extern_kernels.convolution(buf16, primals_14, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf18, (4, 256, 8, 8), (16384, 64, 8, 1)) buf19 = buf18; del buf18 # reuse # Topologically Sorted Source Nodes: [conv2d_6, c4], Original ATen: [aten.convolution, aten.leaky_relu] triton_poi_fused_convolution_leaky_relu_6.run(buf19, primals_15, 65536, grid=grid(65536), stream=stream0) del primals_15 # Topologically Sorted Source Nodes: [conv2d_7], Original ATen: [aten.convolution] buf20 = extern_kernels.convolution(buf19, primals_16, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf20, (4, 256, 8, 8), (16384, 64, 8, 1)) buf21 = buf20; del buf20 # reuse # Topologically Sorted Source Nodes: [conv2d_7, c4_1], Original ATen: [aten.convolution, aten.leaky_relu] triton_poi_fused_convolution_leaky_relu_6.run(buf21, primals_17, 65536, grid=grid(65536), stream=stream0) del primals_17 buf22 = empty_strided_cuda((4, 256, 4, 4), (4096, 16, 4, 1), torch.float32) buf23 = empty_strided_cuda((4, 256, 4, 4), (4096, 16, 4, 1), torch.int8) # Topologically Sorted Source Nodes: [p4], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_7.run(buf21, buf22, buf23, 16384, grid=grid(16384), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_8], Original ATen: [aten.convolution] buf24 = extern_kernels.convolution(buf22, primals_18, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf24, (4, 512, 4, 4), (8192, 16, 4, 1)) buf25 = buf24; del buf24 # reuse # Topologically Sorted Source Nodes: [conv2d_8, c5], Original ATen: [aten.convolution, aten.leaky_relu] triton_poi_fused_convolution_leaky_relu_8.run(buf25, primals_19, 32768, grid=grid(32768), stream=stream0) del primals_19 # Topologically Sorted Source Nodes: [conv2d_9], Original ATen: [aten.convolution] buf26 = extern_kernels.convolution(buf25, primals_20, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf26, (4, 512, 4, 4), (8192, 16, 4, 1)) buf27 = buf26; del buf26 # reuse # Topologically Sorted Source Nodes: [conv2d_9, c5_1], Original ATen: [aten.convolution, aten.leaky_relu] triton_poi_fused_convolution_leaky_relu_8.run(buf27, primals_21, 32768, grid=grid(32768), stream=stream0) del primals_21 # Topologically Sorted Source Nodes: [up6], Original ATen: [aten.convolution] buf28 = extern_kernels.convolution(buf27, primals_22, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf28, (4, 256, 8, 8), (16384, 64, 8, 1)) buf29 = empty_strided_cuda((4, 512, 8, 8), (32768, 64, 8, 1), torch.float32) # Topologically Sorted Source Nodes: [up6_1], Original ATen: [aten.cat] triton_poi_fused_cat_9.run(buf28, primals_23, buf21, buf29, 131072, grid=grid(131072), stream=stream0) del buf28 del primals_23 # Topologically Sorted Source Nodes: [conv2d_10], Original ATen: [aten.convolution] buf30 = extern_kernels.convolution(buf29, primals_24, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf30, (4, 256, 8, 8), (16384, 64, 8, 1)) buf31 = buf30; del buf30 # reuse # Topologically Sorted Source Nodes: [conv2d_10, c6], Original ATen: [aten.convolution, aten.leaky_relu] triton_poi_fused_convolution_leaky_relu_6.run(buf31, primals_25, 65536, grid=grid(65536), stream=stream0) del primals_25 # Topologically Sorted Source Nodes: [conv2d_11], Original ATen: [aten.convolution] buf32 = extern_kernels.convolution(buf31, primals_26, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf32, (4, 256, 8, 8), (16384, 64, 8, 1)) buf33 = buf32; del buf32 # reuse # Topologically Sorted Source Nodes: [conv2d_11, c6_1], Original ATen: [aten.convolution, aten.leaky_relu] triton_poi_fused_convolution_leaky_relu_6.run(buf33, primals_27, 65536, grid=grid(65536), stream=stream0) del primals_27 # Topologically Sorted Source Nodes: [up7], Original ATen: [aten.convolution] buf34 = extern_kernels.convolution(buf33, primals_28, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf34, (4, 128, 16, 16), (32768, 256, 16, 1)) buf35 = empty_strided_cuda((4, 256, 16, 16), (65536, 256, 16, 1), torch.float32) # Topologically Sorted Source Nodes: [up7_1], Original ATen: [aten.cat] triton_poi_fused_cat_10.run(buf34, primals_29, buf15, buf35, 262144, grid=grid(262144), stream=stream0) del buf34 del primals_29 # Topologically Sorted Source Nodes: [conv2d_12], Original ATen: [aten.convolution] buf36 = extern_kernels.convolution(buf35, primals_30, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf36, (4, 128, 16, 16), (32768, 256, 16, 1)) buf37 = buf36; del buf36 # reuse # Topologically Sorted Source Nodes: [conv2d_12, c7], Original ATen: [aten.convolution, aten.leaky_relu] triton_poi_fused_convolution_leaky_relu_4.run(buf37, primals_31, 131072, grid=grid(131072), stream=stream0) del primals_31 # Topologically Sorted Source Nodes: [conv2d_13], Original ATen: [aten.convolution] buf38 = extern_kernels.convolution(buf37, primals_32, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf38, (4, 128, 16, 16), (32768, 256, 16, 1)) buf39 = buf38; del buf38 # reuse # Topologically Sorted Source Nodes: [conv2d_13, c7_1], Original ATen: [aten.convolution, aten.leaky_relu] triton_poi_fused_convolution_leaky_relu_4.run(buf39, primals_33, 131072, grid=grid(131072), stream=stream0) del primals_33 # Topologically Sorted Source Nodes: [up8], Original ATen: [aten.convolution] buf40 = extern_kernels.convolution(buf39, primals_34, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf40, (4, 64, 32, 32), (65536, 1024, 32, 1)) buf41 = empty_strided_cuda((4, 128, 32, 32), (131072, 1024, 32, 1), torch.float32) # Topologically Sorted Source Nodes: [up8_1], Original ATen: [aten.cat] triton_poi_fused_cat_11.run(buf40, primals_35, buf9, buf41, 524288, grid=grid(524288), stream=stream0) del buf40 del primals_35 # Topologically Sorted Source Nodes: [conv2d_14], Original ATen: [aten.convolution] buf42 = extern_kernels.convolution(buf41, primals_36, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf42, (4, 64, 32, 32), (65536, 1024, 32, 1)) buf43 = buf42; del buf42 # reuse # Topologically Sorted Source Nodes: [conv2d_14, c8], Original ATen: [aten.convolution, aten.leaky_relu] triton_poi_fused_convolution_leaky_relu_2.run(buf43, primals_37, 262144, grid=grid(262144), stream=stream0) del primals_37 # Topologically Sorted Source Nodes: [conv2d_15], Original ATen: [aten.convolution] buf44 = extern_kernels.convolution(buf43, primals_38, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf44, (4, 64, 32, 32), (65536, 1024, 32, 1)) buf45 = buf44; del buf44 # reuse # Topologically Sorted Source Nodes: [conv2d_15, c8_1], Original ATen: [aten.convolution, aten.leaky_relu] triton_poi_fused_convolution_leaky_relu_2.run(buf45, primals_39, 262144, grid=grid(262144), stream=stream0) del primals_39 # Topologically Sorted Source Nodes: [up9], Original ATen: [aten.convolution] buf46 = extern_kernels.convolution(buf45, primals_40, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf46, (4, 32, 64, 64), (131072, 4096, 64, 1)) buf47 = empty_strided_cuda((4, 64, 64, 64), (262144, 4096, 64, 1), torch.float32) # Topologically Sorted Source Nodes: [up9_1], Original ATen: [aten.cat] triton_poi_fused_cat_12.run(buf46, primals_41, buf3, buf47, 1048576, grid=grid(1048576), stream=stream0) del buf46 del primals_41 # Topologically Sorted Source Nodes: [conv2d_16], Original ATen: [aten.convolution] buf48 = extern_kernels.convolution(buf47, primals_42, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf48, (4, 32, 64, 64), (131072, 4096, 64, 1)) buf49 = buf48; del buf48 # reuse # Topologically Sorted Source Nodes: [conv2d_16, c9], Original ATen: [aten.convolution, aten.leaky_relu] triton_poi_fused_convolution_leaky_relu_0.run(buf49, primals_43, 524288, grid=grid(524288), stream=stream0) del primals_43 # Topologically Sorted Source Nodes: [conv2d_17], Original ATen: [aten.convolution] buf50 = extern_kernels.convolution(buf49, primals_44, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf50, (4, 32, 64, 64), (131072, 4096, 64, 1)) buf51 = buf50; del buf50 # reuse # Topologically Sorted Source Nodes: [conv2d_17, c9_1], Original ATen: [aten.convolution, aten.leaky_relu] triton_poi_fused_convolution_leaky_relu_0.run(buf51, primals_45, 524288, grid=grid(524288), stream=stream0) del primals_45 # Topologically Sorted Source Nodes: [c10], Original ATen: [aten.convolution] buf52 = extern_kernels.convolution(buf51, primals_46, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf52, (4, 12, 64, 64), (49152, 4096, 64, 1)) buf53 = empty_strided_cuda((4, 3, 64, 2, 64, 2), (49152, 16384, 256, 128, 2, 1), torch.float32) # Topologically Sorted Source Nodes: [out], Original ATen: [aten.pixel_shuffle] triton_poi_fused_pixel_shuffle_13.run(buf52, primals_47, buf53, 98304, 2, grid=grid(98304, 2), stream=stream0) del buf52 del primals_47 return (reinterpret_tensor(buf53, (4, 3, 128, 128), (49152, 16384, 128, 1), 0), primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, primals_12, primals_14, primals_16, primals_18, primals_20, primals_22, primals_24, primals_26, primals_28, primals_30, primals_32, primals_34, primals_36, primals_38, primals_40, primals_42, primals_44, primals_46, buf1, buf3, buf4, buf5, buf7, buf9, buf10, buf11, buf13, buf15, buf16, buf17, buf19, buf21, buf22, buf23, buf25, buf27, buf29, buf31, buf33, buf35, buf37, buf39, buf41, buf43, buf45, buf47, buf49, buf51, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((32, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 64, 64), (16384, 4096, 64, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((32, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((64, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((128, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_13 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_14 = rand_strided((256, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_15 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_16 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_17 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_18 = rand_strided((512, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_19 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_20 = rand_strided((512, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_21 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_22 = rand_strided((512, 256, 2, 2), (1024, 4, 2, 1), device='cuda:0', dtype=torch.float32) primals_23 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_24 = rand_strided((256, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_25 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_26 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_27 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_28 = rand_strided((256, 128, 2, 2), (512, 4, 2, 1), device='cuda:0', dtype=torch.float32) primals_29 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_30 = rand_strided((128, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_31 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_32 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_33 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_34 = rand_strided((128, 64, 2, 2), (256, 4, 2, 1), device='cuda:0', dtype=torch.float32) primals_35 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_36 = rand_strided((64, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_37 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_38 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_39 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_40 = rand_strided((64, 32, 2, 2), (128, 4, 2, 1), device='cuda:0', dtype=torch.float32) primals_41 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32) primals_42 = rand_strided((32, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_43 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32) primals_44 = rand_strided((32, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_45 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32) primals_46 = rand_strided((12, 32, 1, 1), (32, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_47 = rand_strided((12, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch import torch.nn as nn class Net(nn.Module): def __init__(self): super(Net, self).__init__() for m in self.modules(): if isinstance(m, nn.Conv2d): n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels m.weight.data.normal_(0, math.sqrt(2.0 / n)) m.bias.data.zero_() elif isinstance(m, nn.ConvTranspose2d): n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels m.weight.data.normal_(0, math.sqrt(2.0 / n)) self.conv1_a = nn.Conv2d(in_channels=4, out_channels=32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), dilation=(1, 1)) self.conv1_b = nn.Conv2d(in_channels=32, out_channels=32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), dilation=(1, 1)) self.pool1 = nn.MaxPool2d(kernel_size=(2, 2)) self.conv2_a = nn.Conv2d(in_channels=32, out_channels=64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), dilation=(1, 1)) self.conv2_b = nn.Conv2d(in_channels=64, out_channels=64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), dilation=(1, 1)) self.pool2 = nn.MaxPool2d(kernel_size=(2, 2)) self.conv3_a = nn.Conv2d(in_channels=64, out_channels=128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), dilation=(1, 1)) self.conv3_b = nn.Conv2d(in_channels=128, out_channels=128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), dilation=(1, 1)) self.pool3 = nn.MaxPool2d(kernel_size=(2, 2)) self.conv4_a = nn.Conv2d(in_channels=128, out_channels=256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), dilation=(1, 1)) self.conv4_b = nn.Conv2d(in_channels=256, out_channels=256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), dilation=(1, 1)) self.pool4 = nn.MaxPool2d(kernel_size=(2, 2)) self.conv5_a = nn.Conv2d(in_channels=256, out_channels=512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), dilation=(1, 1)) self.conv5_b = nn.Conv2d(in_channels=512, out_channels=512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), dilation=(1, 1)) self.up6 = nn.ConvTranspose2d(in_channels=512, out_channels=256, kernel_size=(2, 2), stride=(2, 2)) self.conv6_a = nn.Conv2d(in_channels=512, out_channels=256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), dilation=(1, 1)) self.conv6_b = nn.Conv2d(in_channels=256, out_channels=256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), dilation=(1, 1)) self.up7 = nn.ConvTranspose2d(in_channels=256, out_channels=128, kernel_size=(2, 2), stride=(2, 2)) self.conv7_a = nn.Conv2d(in_channels=256, out_channels=128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), dilation=(1, 1)) self.conv7_b = nn.Conv2d(in_channels=128, out_channels=128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), dilation=(1, 1)) self.up8 = nn.ConvTranspose2d(in_channels=128, out_channels=64, kernel_size=(2, 2), stride=(2, 2)) self.conv8_a = nn.Conv2d(in_channels=128, out_channels=64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), dilation=(1, 1)) self.conv8_b = nn.Conv2d(in_channels=64, out_channels=64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), dilation=(1, 1)) self.up9 = nn.ConvTranspose2d(in_channels=64, out_channels=32, kernel_size=(2, 2), stride=(2, 2)) self.conv9_a = nn.Conv2d(in_channels=64, out_channels=32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), dilation=(1, 1)) self.conv9_b = nn.Conv2d(in_channels=32, out_channels=32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), dilation=(1, 1)) self.conv10 = nn.Conv2d(in_channels=32, out_channels=12, kernel_size=(1, 1), stride=(1, 1), dilation=(1, 1)) self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True) def forward(self, x): c1 = self.lrelu(self.conv1_a(x)) c1 = self.lrelu(self.conv1_b(c1)) p1 = self.pool1(c1) c2 = self.lrelu(self.conv2_a(p1)) c2 = self.lrelu(self.conv2_b(c2)) p2 = self.pool1(c2) c3 = self.lrelu(self.conv3_a(p2)) c3 = self.lrelu(self.conv3_b(c3)) p3 = self.pool1(c3) c4 = self.lrelu(self.conv4_a(p3)) c4 = self.lrelu(self.conv4_b(c4)) p4 = self.pool1(c4) c5 = self.lrelu(self.conv5_a(p4)) c5 = self.lrelu(self.conv5_b(c5)) up6 = self.up6(c5) up6 = torch.cat([up6, c4], 1) c6 = self.lrelu(self.conv6_a(up6)) c6 = self.lrelu(self.conv6_b(c6)) up7 = self.up7(c6) up7 = torch.cat([up7, c3], 1) c7 = self.lrelu(self.conv7_a(up7)) c7 = self.lrelu(self.conv7_b(c7)) up8 = self.up8(c7) up8 = torch.cat([up8, c2], 1) c8 = self.lrelu(self.conv8_a(up8)) c8 = self.lrelu(self.conv8_b(c8)) up9 = self.up9(c8) up9 = torch.cat([up9, c1], 1) c9 = self.lrelu(self.conv9_a(up9)) c9 = self.lrelu(self.conv9_b(c9)) c10 = self.conv10(c9) out = nn.functional.pixel_shuffle(c10, 2) return out def get_inputs(): return [torch.rand([4, 4, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_leaky_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 32 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(in_out_ptr0 + x3, tmp7, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 32 x1 = xindex // 32 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 128 * x1), None, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 128 * x1), None, eviction_policy ='evict_last') tmp3 = tl.load(in_ptr0 + (64 + 2 * x0 + 128 * x1), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (65 + 2 * x0 + 128 * x1), None, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x2, tmp6, None) tl.store(out_ptr1 + x2, tmp16, None) @triton.jit def triton_poi_fused_convolution_leaky_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 1024 % 64 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(in_out_ptr0 + x3, tmp7, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 64 * x1), None, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 64 * x1), None, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (32 + 2 * x0 + 64 * x1), None, eviction_policy ='evict_last') tmp5 = tl.load(in_ptr0 + (33 + 2 * x0 + 64 * x1), None, eviction_policy ='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x2, tmp6, None) tl.store(out_ptr1 + x2, tmp16, None) @triton.jit def triton_poi_fused_convolution_leaky_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 256 % 128 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(in_out_ptr0 + x3, tmp7, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_5(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 32 * x1), None, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 32 * x1), None, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (16 + 2 * x0 + 32 * x1), None, eviction_policy ='evict_last') tmp5 = tl.load(in_ptr0 + (17 + 2 * x0 + 32 * x1), None, eviction_policy ='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x2, tmp6, None) tl.store(out_ptr1 + x2, tmp16, None) @triton.jit def triton_poi_fused_convolution_leaky_relu_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 64 % 256 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(in_out_ptr0 + x3, tmp7, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_7(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 16 * x1), None, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 16 * x1), None, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (8 + 2 * x0 + 16 * x1), None, eviction_policy= 'evict_last') tmp5 = tl.load(in_ptr0 + (9 + 2 * x0 + 16 * x1), None, eviction_policy= 'evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x2, tmp6, None) tl.store(out_ptr1 + x2, tmp16, None) @triton.jit def triton_poi_fused_convolution_leaky_relu_8(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 16 % 512 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(in_out_ptr0 + x3, tmp7, None) @triton.jit def triton_poi_fused_cat_9(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 64 % 512 x0 = xindex % 64 x2 = xindex // 32768 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 256, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 64 * x1 + 16384 * x2), tmp4, other=0.0) tmp6 = tl.load(in_ptr1 + x1, tmp4, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype) tmp9 = tl.where(tmp4, tmp7, tmp8) tmp10 = tmp0 >= tmp3 tl.full([1], 512, tl.int64) tmp13 = tl.load(in_ptr2 + (x0 + 64 * (-256 + x1) + 16384 * x2), tmp10, other=0.0) tmp14 = tl.where(tmp4, tmp9, tmp13) tl.store(out_ptr0 + x3, tmp14, None) @triton.jit def triton_poi_fused_cat_10(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 256 % 256 x0 = xindex % 256 x2 = xindex // 65536 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 128, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 256 * x1 + 32768 * x2), tmp4, other=0.0) tmp6 = tl.load(in_ptr1 + x1, tmp4, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype) tmp9 = tl.where(tmp4, tmp7, tmp8) tmp10 = tmp0 >= tmp3 tl.full([1], 256, tl.int64) tmp13 = tl.load(in_ptr2 + (x0 + 256 * (-128 + x1) + 32768 * x2), tmp10, other=0.0) tmp14 = tl.where(tmp4, tmp9, tmp13) tl.store(out_ptr0 + x3, tmp14, None) @triton.jit def triton_poi_fused_cat_11(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 1024 % 128 x0 = xindex % 1024 x2 = xindex // 131072 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 64, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 1024 * x1 + 65536 * x2), tmp4, other=0.0) tmp6 = tl.load(in_ptr1 + x1, tmp4, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype) tmp9 = tl.where(tmp4, tmp7, tmp8) tmp10 = tmp0 >= tmp3 tl.full([1], 128, tl.int64) tmp13 = tl.load(in_ptr2 + (x0 + 1024 * (-64 + x1) + 65536 * x2), tmp10, other=0.0) tmp14 = tl.where(tmp4, tmp9, tmp13) tl.store(out_ptr0 + x3, tmp14, None) @triton.jit def triton_poi_fused_cat_12(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 4096 % 64 x0 = xindex % 4096 x2 = xindex // 262144 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 32, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 4096 * x1 + 131072 * x2), tmp4, other=0.0) tmp6 = tl.load(in_ptr1 + x1, tmp4, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype) tmp9 = tl.where(tmp4, tmp7, tmp8) tmp10 = tmp0 >= tmp3 tl.full([1], 64, tl.int64) tmp13 = tl.load(in_ptr2 + (x0 + 4096 * (-32 + x1) + 131072 * x2), tmp10, other=0.0) tmp14 = tl.where(tmp4, tmp9, tmp13) tl.store(out_ptr0 + x3, tmp14, None) @triton.jit def triton_poi_fused_pixel_shuffle_13(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): xnumel = 2 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1) ) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x5 = xindex y0 = yindex % 64 y1 = yindex // 64 % 2 y2 = yindex // 128 % 64 y6 = yindex // 8192 y3 = yindex // 8192 % 3 y7 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 64 * y2 + 4096 * x5 + 8192 * y1 + 16384 * y6), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x5 + 2 * y1 + 4 * y3), xmask, eviction_policy ='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x5 + 2 * y7), tmp2, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47) = args args.clear() assert_size_stride(primals_1, (32, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (32,), (1,)) assert_size_stride(primals_3, (4, 4, 64, 64), (16384, 4096, 64, 1)) assert_size_stride(primals_4, (32, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_5, (32,), (1,)) assert_size_stride(primals_6, (64, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_7, (64,), (1,)) assert_size_stride(primals_8, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_9, (64,), (1,)) assert_size_stride(primals_10, (128, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_11, (128,), (1,)) assert_size_stride(primals_12, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_13, (128,), (1,)) assert_size_stride(primals_14, (256, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_15, (256,), (1,)) assert_size_stride(primals_16, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_17, (256,), (1,)) assert_size_stride(primals_18, (512, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_19, (512,), (1,)) assert_size_stride(primals_20, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_21, (512,), (1,)) assert_size_stride(primals_22, (512, 256, 2, 2), (1024, 4, 2, 1)) assert_size_stride(primals_23, (256,), (1,)) assert_size_stride(primals_24, (256, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_25, (256,), (1,)) assert_size_stride(primals_26, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_27, (256,), (1,)) assert_size_stride(primals_28, (256, 128, 2, 2), (512, 4, 2, 1)) assert_size_stride(primals_29, (128,), (1,)) assert_size_stride(primals_30, (128, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_31, (128,), (1,)) assert_size_stride(primals_32, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_33, (128,), (1,)) assert_size_stride(primals_34, (128, 64, 2, 2), (256, 4, 2, 1)) assert_size_stride(primals_35, (64,), (1,)) assert_size_stride(primals_36, (64, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_37, (64,), (1,)) assert_size_stride(primals_38, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_39, (64,), (1,)) assert_size_stride(primals_40, (64, 32, 2, 2), (128, 4, 2, 1)) assert_size_stride(primals_41, (32,), (1,)) assert_size_stride(primals_42, (32, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_43, (32,), (1,)) assert_size_stride(primals_44, (32, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_45, (32,), (1,)) assert_size_stride(primals_46, (12, 32, 1, 1), (32, 1, 1, 1)) assert_size_stride(primals_47, (12,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 32, 64, 64), (131072, 4096, 64, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_leaky_relu_0[grid(524288)](buf1, primals_2, 524288, XBLOCK=1024, num_warps=4, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 32, 64, 64), (131072, 4096, 64, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_leaky_relu_0[grid(524288)](buf3, primals_5, 524288, XBLOCK=1024, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((4, 32, 32, 32), (32768, 1024, 32, 1), torch.float32) buf5 = empty_strided_cuda((4, 32, 32, 32), (32768, 1024, 32, 1), torch.int8) triton_poi_fused_max_pool2d_with_indices_1[grid(131072)](buf3, buf4, buf5, 131072, XBLOCK=512, num_warps=8, num_stages=1) buf6 = extern_kernels.convolution(buf4, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 64, 32, 32), (65536, 1024, 32, 1)) buf7 = buf6 del buf6 triton_poi_fused_convolution_leaky_relu_2[grid(262144)](buf7, primals_7, 262144, XBLOCK=512, num_warps=8, num_stages=1) del primals_7 buf8 = extern_kernels.convolution(buf7, primals_8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 64, 32, 32), (65536, 1024, 32, 1)) buf9 = buf8 del buf8 triton_poi_fused_convolution_leaky_relu_2[grid(262144)](buf9, primals_9, 262144, XBLOCK=512, num_warps=8, num_stages=1) del primals_9 buf10 = empty_strided_cuda((4, 64, 16, 16), (16384, 256, 16, 1), torch.float32) buf11 = empty_strided_cuda((4, 64, 16, 16), (16384, 256, 16, 1), torch.int8) triton_poi_fused_max_pool2d_with_indices_3[grid(65536)](buf9, buf10, buf11, 65536, XBLOCK=256, num_warps=4, num_stages=1) buf12 = extern_kernels.convolution(buf10, primals_10, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf12, (4, 128, 16, 16), (32768, 256, 16, 1)) buf13 = buf12 del buf12 triton_poi_fused_convolution_leaky_relu_4[grid(131072)](buf13, primals_11, 131072, XBLOCK=1024, num_warps=4, num_stages=1) del primals_11 buf14 = extern_kernels.convolution(buf13, primals_12, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf14, (4, 128, 16, 16), (32768, 256, 16, 1)) buf15 = buf14 del buf14 triton_poi_fused_convolution_leaky_relu_4[grid(131072)](buf15, primals_13, 131072, XBLOCK=1024, num_warps=4, num_stages=1) del primals_13 buf16 = empty_strided_cuda((4, 128, 8, 8), (8192, 64, 8, 1), torch. float32) buf17 = empty_strided_cuda((4, 128, 8, 8), (8192, 64, 8, 1), torch.int8 ) triton_poi_fused_max_pool2d_with_indices_5[grid(32768)](buf15, buf16, buf17, 32768, XBLOCK=256, num_warps=4, num_stages=1) buf18 = extern_kernels.convolution(buf16, primals_14, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf18, (4, 256, 8, 8), (16384, 64, 8, 1)) buf19 = buf18 del buf18 triton_poi_fused_convolution_leaky_relu_6[grid(65536)](buf19, primals_15, 65536, XBLOCK=256, num_warps=4, num_stages=1) del primals_15 buf20 = extern_kernels.convolution(buf19, primals_16, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf20, (4, 256, 8, 8), (16384, 64, 8, 1)) buf21 = buf20 del buf20 triton_poi_fused_convolution_leaky_relu_6[grid(65536)](buf21, primals_17, 65536, XBLOCK=256, num_warps=4, num_stages=1) del primals_17 buf22 = empty_strided_cuda((4, 256, 4, 4), (4096, 16, 4, 1), torch. float32) buf23 = empty_strided_cuda((4, 256, 4, 4), (4096, 16, 4, 1), torch.int8 ) triton_poi_fused_max_pool2d_with_indices_7[grid(16384)](buf21, buf22, buf23, 16384, XBLOCK=128, num_warps=4, num_stages=1) buf24 = extern_kernels.convolution(buf22, primals_18, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf24, (4, 512, 4, 4), (8192, 16, 4, 1)) buf25 = buf24 del buf24 triton_poi_fused_convolution_leaky_relu_8[grid(32768)](buf25, primals_19, 32768, XBLOCK=256, num_warps=4, num_stages=1) del primals_19 buf26 = extern_kernels.convolution(buf25, primals_20, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf26, (4, 512, 4, 4), (8192, 16, 4, 1)) buf27 = buf26 del buf26 triton_poi_fused_convolution_leaky_relu_8[grid(32768)](buf27, primals_21, 32768, XBLOCK=256, num_warps=4, num_stages=1) del primals_21 buf28 = extern_kernels.convolution(buf27, primals_22, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf28, (4, 256, 8, 8), (16384, 64, 8, 1)) buf29 = empty_strided_cuda((4, 512, 8, 8), (32768, 64, 8, 1), torch .float32) triton_poi_fused_cat_9[grid(131072)](buf28, primals_23, buf21, buf29, 131072, XBLOCK=512, num_warps=8, num_stages=1) del buf28 del primals_23 buf30 = extern_kernels.convolution(buf29, primals_24, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf30, (4, 256, 8, 8), (16384, 64, 8, 1)) buf31 = buf30 del buf30 triton_poi_fused_convolution_leaky_relu_6[grid(65536)](buf31, primals_25, 65536, XBLOCK=256, num_warps=4, num_stages=1) del primals_25 buf32 = extern_kernels.convolution(buf31, primals_26, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf32, (4, 256, 8, 8), (16384, 64, 8, 1)) buf33 = buf32 del buf32 triton_poi_fused_convolution_leaky_relu_6[grid(65536)](buf33, primals_27, 65536, XBLOCK=256, num_warps=4, num_stages=1) del primals_27 buf34 = extern_kernels.convolution(buf33, primals_28, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf34, (4, 128, 16, 16), (32768, 256, 16, 1)) buf35 = empty_strided_cuda((4, 256, 16, 16), (65536, 256, 16, 1), torch.float32) triton_poi_fused_cat_10[grid(262144)](buf34, primals_29, buf15, buf35, 262144, XBLOCK=1024, num_warps=4, num_stages=1) del buf34 del primals_29 buf36 = extern_kernels.convolution(buf35, primals_30, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf36, (4, 128, 16, 16), (32768, 256, 16, 1)) buf37 = buf36 del buf36 triton_poi_fused_convolution_leaky_relu_4[grid(131072)](buf37, primals_31, 131072, XBLOCK=1024, num_warps=4, num_stages=1) del primals_31 buf38 = extern_kernels.convolution(buf37, primals_32, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf38, (4, 128, 16, 16), (32768, 256, 16, 1)) buf39 = buf38 del buf38 triton_poi_fused_convolution_leaky_relu_4[grid(131072)](buf39, primals_33, 131072, XBLOCK=1024, num_warps=4, num_stages=1) del primals_33 buf40 = extern_kernels.convolution(buf39, primals_34, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf40, (4, 64, 32, 32), (65536, 1024, 32, 1)) buf41 = empty_strided_cuda((4, 128, 32, 32), (131072, 1024, 32, 1), torch.float32) triton_poi_fused_cat_11[grid(524288)](buf40, primals_35, buf9, buf41, 524288, XBLOCK=1024, num_warps=4, num_stages=1) del buf40 del primals_35 buf42 = extern_kernels.convolution(buf41, primals_36, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf42, (4, 64, 32, 32), (65536, 1024, 32, 1)) buf43 = buf42 del buf42 triton_poi_fused_convolution_leaky_relu_2[grid(262144)](buf43, primals_37, 262144, XBLOCK=512, num_warps=8, num_stages=1) del primals_37 buf44 = extern_kernels.convolution(buf43, primals_38, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf44, (4, 64, 32, 32), (65536, 1024, 32, 1)) buf45 = buf44 del buf44 triton_poi_fused_convolution_leaky_relu_2[grid(262144)](buf45, primals_39, 262144, XBLOCK=512, num_warps=8, num_stages=1) del primals_39 buf46 = extern_kernels.convolution(buf45, primals_40, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf46, (4, 32, 64, 64), (131072, 4096, 64, 1)) buf47 = empty_strided_cuda((4, 64, 64, 64), (262144, 4096, 64, 1), torch.float32) triton_poi_fused_cat_12[grid(1048576)](buf46, primals_41, buf3, buf47, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) del buf46 del primals_41 buf48 = extern_kernels.convolution(buf47, primals_42, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf48, (4, 32, 64, 64), (131072, 4096, 64, 1)) buf49 = buf48 del buf48 triton_poi_fused_convolution_leaky_relu_0[grid(524288)](buf49, primals_43, 524288, XBLOCK=1024, num_warps=4, num_stages=1) del primals_43 buf50 = extern_kernels.convolution(buf49, primals_44, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf50, (4, 32, 64, 64), (131072, 4096, 64, 1)) buf51 = buf50 del buf50 triton_poi_fused_convolution_leaky_relu_0[grid(524288)](buf51, primals_45, 524288, XBLOCK=1024, num_warps=4, num_stages=1) del primals_45 buf52 = extern_kernels.convolution(buf51, primals_46, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf52, (4, 12, 64, 64), (49152, 4096, 64, 1)) buf53 = empty_strided_cuda((4, 3, 64, 2, 64, 2), (49152, 16384, 256, 128, 2, 1), torch.float32) triton_poi_fused_pixel_shuffle_13[grid(98304, 2)](buf52, primals_47, buf53, 98304, 2, XBLOCK=2, YBLOCK=1024, num_warps=8, num_stages=1) del buf52 del primals_47 return (reinterpret_tensor(buf53, (4, 3, 128, 128), (49152, 16384, 128, 1), 0), primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, primals_12, primals_14, primals_16, primals_18, primals_20, primals_22, primals_24, primals_26, primals_28, primals_30, primals_32, primals_34, primals_36, primals_38, primals_40, primals_42, primals_44, primals_46, buf1, buf3, buf4, buf5, buf7, buf9, buf10, buf11, buf13, buf15, buf16, buf17, buf19, buf21, buf22, buf23, buf25, buf27, buf29, buf31, buf33, buf35, buf37, buf39, buf41, buf43, buf45, buf47, buf49, buf51) class NetNew(nn.Module): def __init__(self): super(NetNew, self).__init__() for m in self.modules(): if isinstance(m, nn.Conv2d): n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels m.weight.data.normal_(0, math.sqrt(2.0 / n)) m.bias.data.zero_() elif isinstance(m, nn.ConvTranspose2d): n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels m.weight.data.normal_(0, math.sqrt(2.0 / n)) self.conv1_a = nn.Conv2d(in_channels=4, out_channels=32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), dilation=(1, 1)) self.conv1_b = nn.Conv2d(in_channels=32, out_channels=32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), dilation=(1, 1)) self.pool1 = nn.MaxPool2d(kernel_size=(2, 2)) self.conv2_a = nn.Conv2d(in_channels=32, out_channels=64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), dilation=(1, 1)) self.conv2_b = nn.Conv2d(in_channels=64, out_channels=64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), dilation=(1, 1)) self.pool2 = nn.MaxPool2d(kernel_size=(2, 2)) self.conv3_a = nn.Conv2d(in_channels=64, out_channels=128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), dilation=(1, 1)) self.conv3_b = nn.Conv2d(in_channels=128, out_channels=128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), dilation=(1, 1)) self.pool3 = nn.MaxPool2d(kernel_size=(2, 2)) self.conv4_a = nn.Conv2d(in_channels=128, out_channels=256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), dilation=(1, 1)) self.conv4_b = nn.Conv2d(in_channels=256, out_channels=256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), dilation=(1, 1)) self.pool4 = nn.MaxPool2d(kernel_size=(2, 2)) self.conv5_a = nn.Conv2d(in_channels=256, out_channels=512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), dilation=(1, 1)) self.conv5_b = nn.Conv2d(in_channels=512, out_channels=512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), dilation=(1, 1)) self.up6 = nn.ConvTranspose2d(in_channels=512, out_channels=256, kernel_size=(2, 2), stride=(2, 2)) self.conv6_a = nn.Conv2d(in_channels=512, out_channels=256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), dilation=(1, 1)) self.conv6_b = nn.Conv2d(in_channels=256, out_channels=256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), dilation=(1, 1)) self.up7 = nn.ConvTranspose2d(in_channels=256, out_channels=128, kernel_size=(2, 2), stride=(2, 2)) self.conv7_a = nn.Conv2d(in_channels=256, out_channels=128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), dilation=(1, 1)) self.conv7_b = nn.Conv2d(in_channels=128, out_channels=128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), dilation=(1, 1)) self.up8 = nn.ConvTranspose2d(in_channels=128, out_channels=64, kernel_size=(2, 2), stride=(2, 2)) self.conv8_a = nn.Conv2d(in_channels=128, out_channels=64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), dilation=(1, 1)) self.conv8_b = nn.Conv2d(in_channels=64, out_channels=64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), dilation=(1, 1)) self.up9 = nn.ConvTranspose2d(in_channels=64, out_channels=32, kernel_size=(2, 2), stride=(2, 2)) self.conv9_a = nn.Conv2d(in_channels=64, out_channels=32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), dilation=(1, 1)) self.conv9_b = nn.Conv2d(in_channels=32, out_channels=32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), dilation=(1, 1)) self.conv10 = nn.Conv2d(in_channels=32, out_channels=12, kernel_size=(1, 1), stride=(1, 1), dilation=(1, 1)) self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True) def forward(self, input_0): primals_1 = self.conv1_a.weight primals_2 = self.conv1_a.bias primals_4 = self.conv1_b.weight primals_5 = self.conv1_b.bias primals_6 = self.conv2_a.weight primals_7 = self.conv2_a.bias primals_8 = self.conv2_b.weight primals_9 = self.conv2_b.bias primals_10 = self.conv3_a.weight primals_11 = self.conv3_a.bias primals_12 = self.conv3_b.weight primals_13 = self.conv3_b.bias primals_14 = self.conv4_a.weight primals_15 = self.conv4_a.bias primals_16 = self.conv4_b.weight primals_17 = self.conv4_b.bias primals_18 = self.conv5_a.weight primals_19 = self.conv5_a.bias primals_20 = self.conv5_b.weight primals_21 = self.conv5_b.bias primals_22 = self.up6.weight primals_23 = self.up6.bias primals_24 = self.conv6_a.weight primals_25 = self.conv6_a.bias primals_26 = self.conv6_b.weight primals_27 = self.conv6_b.bias primals_28 = self.up7.weight primals_29 = self.up7.bias primals_30 = self.conv7_a.weight primals_31 = self.conv7_a.bias primals_32 = self.conv7_b.weight primals_33 = self.conv7_b.bias primals_34 = self.up8.weight primals_35 = self.up8.bias primals_36 = self.conv8_a.weight primals_37 = self.conv8_a.bias primals_38 = self.conv8_b.weight primals_39 = self.conv8_b.bias primals_40 = self.up9.weight primals_41 = self.up9.bias primals_42 = self.conv9_a.weight primals_43 = self.conv9_a.bias primals_44 = self.conv9_b.weight primals_45 = self.conv9_b.bias primals_46 = self.conv10.weight primals_47 = self.conv10.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47]) return output[0]
Groenbech96/Learning-to-See-in-the-Dark
Net
false
5,319
[ "MIT" ]
1
a068c8642a651e4af195cd71e253694d88dfe3c5
https://github.com/Groenbech96/Learning-to-See-in-the-Dark/tree/a068c8642a651e4af195cd71e253694d88dfe3c5
import math import torch import torch.nn as nn class Model(nn.Module): def __init__(self): super().__init__() for m in self.modules(): if isinstance(m, nn.Conv2d): n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels m.weight.data.normal_(0, math.sqrt(2.0 / n)) m.bias.data.zero_() elif isinstance(m, nn.ConvTranspose2d): n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels m.weight.data.normal_(0, math.sqrt(2.0 / n)) self.conv1_a = nn.Conv2d(in_channels=4, out_channels=32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), dilation=(1, 1)) self.conv1_b = nn.Conv2d(in_channels=32, out_channels=32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), dilation=(1, 1)) self.pool1 = nn.MaxPool2d(kernel_size=(2, 2)) self.conv2_a = nn.Conv2d(in_channels=32, out_channels=64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), dilation=(1, 1)) self.conv2_b = nn.Conv2d(in_channels=64, out_channels=64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), dilation=(1, 1)) self.pool2 = nn.MaxPool2d(kernel_size=(2, 2)) self.conv3_a = nn.Conv2d(in_channels=64, out_channels=128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), dilation=(1, 1)) self.conv3_b = nn.Conv2d(in_channels=128, out_channels=128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), dilation=(1, 1)) self.pool3 = nn.MaxPool2d(kernel_size=(2, 2)) self.conv4_a = nn.Conv2d(in_channels=128, out_channels=256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), dilation=(1, 1)) self.conv4_b = nn.Conv2d(in_channels=256, out_channels=256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), dilation=(1, 1)) self.pool4 = nn.MaxPool2d(kernel_size=(2, 2)) self.conv5_a = nn.Conv2d(in_channels=256, out_channels=512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), dilation=(1, 1)) self.conv5_b = nn.Conv2d(in_channels=512, out_channels=512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), dilation=(1, 1)) self.up6 = nn.ConvTranspose2d(in_channels=512, out_channels=256, kernel_size=(2, 2), stride=(2, 2)) self.conv6_a = nn.Conv2d(in_channels=512, out_channels=256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), dilation=(1, 1)) self.conv6_b = nn.Conv2d(in_channels=256, out_channels=256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), dilation=(1, 1)) self.up7 = nn.ConvTranspose2d(in_channels=256, out_channels=128, kernel_size=(2, 2), stride=(2, 2)) self.conv7_a = nn.Conv2d(in_channels=256, out_channels=128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), dilation=(1, 1)) self.conv7_b = nn.Conv2d(in_channels=128, out_channels=128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), dilation=(1, 1)) self.up8 = nn.ConvTranspose2d(in_channels=128, out_channels=64, kernel_size=(2, 2), stride=(2, 2)) self.conv8_a = nn.Conv2d(in_channels=128, out_channels=64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), dilation=(1, 1)) self.conv8_b = nn.Conv2d(in_channels=64, out_channels=64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), dilation=(1, 1)) self.up9 = nn.ConvTranspose2d(in_channels=64, out_channels=32, kernel_size=(2, 2), stride=(2, 2)) self.conv9_a = nn.Conv2d(in_channels=64, out_channels=32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), dilation=(1, 1)) self.conv9_b = nn.Conv2d(in_channels=32, out_channels=32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), dilation=(1, 1)) self.conv10 = nn.Conv2d(in_channels=32, out_channels=12, kernel_size=(1, 1), stride=( # ... truncated (>4000 chars) for memory efficiency
MultiHeadedAttention
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/dk/cdk4odz276xorciau5ehgl7f3s2mgkf3hrye6xep6kzubczdeqqy.py # Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone] # Source node to ATen node mapping: # matmul => clone # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + (4*y3)), tmp2, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/bs/cbsluabtq7ll426nybkislhh3cajm6f7ggrxam362hohynwnvtk6.py # Topologically Sorted Source Nodes: [mask], Original ATen: [aten.eq] # Source node to ATen node mapping: # mask => eq # Graph fragment: # %eq : [num_users=3] = call_function[target=torch.ops.aten.eq.Scalar](args = (%unsqueeze, 0), kwargs = {}) triton_poi_fused_eq_1 = async_compile.triton('triton_poi_fused_eq_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_eq_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_eq_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 0.0 tmp2 = tmp0 == tmp1 tl.store(out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/2m/c2mqa6egkdg5wothwstp4t4dtvjgjekw5mx5ldieie6krs4bxolb.py # Topologically Sorted Source Nodes: [scores, scores_1, softmax], Original ATen: [aten.div, aten.masked_fill, aten._softmax] # Source node to ATen node mapping: # scores => div # scores_1 => full_default, where # softmax => amax, exp, sub, sum_1 # Graph fragment: # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_11, 1.0), kwargs = {}) # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -inf), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%eq, %full_default, %div), kwargs = {}) # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%where, [-1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) triton_poi_fused__softmax_div_masked_fill_2 = async_compile.triton('triton_poi_fused__softmax_div_masked_fill_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*i1', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_div_masked_fill_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_div_masked_fill_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = (xindex // 16) x3 = xindex tmp0 = tl.load(in_ptr0 + ((4*x0) + (16*x2)), xmask, eviction_policy='evict_last').to(tl.int1) tmp1 = tl.load(in_ptr1 + (4*x3), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (1 + (4*x0) + (16*x2)), xmask, eviction_policy='evict_last').to(tl.int1) tmp7 = tl.load(in_ptr1 + (1 + (4*x3)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (2 + (4*x0) + (16*x2)), xmask, eviction_policy='evict_last').to(tl.int1) tmp12 = tl.load(in_ptr1 + (2 + (4*x3)), xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr0 + (3 + (4*x0) + (16*x2)), xmask, eviction_policy='evict_last').to(tl.int1) tmp17 = tl.load(in_ptr1 + (3 + (4*x3)), xmask, eviction_policy='evict_last') tmp2 = 1.0 tmp3 = tmp1 * tmp2 tmp4 = float("-inf") tmp5 = tl.where(tmp0, tmp4, tmp3) tmp8 = tmp7 * tmp2 tmp9 = tl.where(tmp6, tmp4, tmp8) tmp10 = triton_helpers.maximum(tmp5, tmp9) tmp13 = tmp12 * tmp2 tmp14 = tl.where(tmp11, tmp4, tmp13) tmp15 = triton_helpers.maximum(tmp10, tmp14) tmp18 = tmp17 * tmp2 tmp19 = tl.where(tmp16, tmp4, tmp18) tmp20 = triton_helpers.maximum(tmp15, tmp19) tmp21 = tmp5 - tmp20 tmp22 = tl_math.exp(tmp21) tmp23 = tmp9 - tmp20 tmp24 = tl_math.exp(tmp23) tmp25 = tmp22 + tmp24 tmp26 = tmp14 - tmp20 tmp27 = tl_math.exp(tmp26) tmp28 = tmp25 + tmp27 tmp29 = tmp19 - tmp20 tmp30 = tl_math.exp(tmp29) tmp31 = tmp28 + tmp30 tl.store(out_ptr0 + (x3), tmp20, xmask) tl.store(out_ptr1 + (x3), tmp31, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/qh/cqhmiexfxtu22w5zckygdewcq6zc6ji4wfscqj2genrriihibfdb.py # Topologically Sorted Source Nodes: [scores, scores_1, softmax, attn], Original ATen: [aten.div, aten.masked_fill, aten._softmax] # Source node to ATen node mapping: # attn => full_default_1, where_1 # scores => div # scores_1 => full_default, where # softmax => amax, div_1, exp, sub # Graph fragment: # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_11, 1.0), kwargs = {}) # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -inf), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%eq, %full_default, %div), kwargs = {}) # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%where, [-1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) # %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where_1 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%eq, %full_default_1, %div_1), kwargs = {}) triton_poi_fused__softmax_div_masked_fill_3 = async_compile.triton('triton_poi_fused__softmax_div_masked_fill_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*i1', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_div_masked_fill_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_div_masked_fill_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = (xindex // 64) x4 = xindex % 16 x5 = xindex x6 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x4 + (16*x3)), xmask, eviction_policy='evict_last').to(tl.int1) tmp1 = tl.load(in_ptr1 + (x5), xmask) tmp6 = tl.load(in_ptr2 + (x6), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr3 + (x6), xmask, eviction_policy='evict_last') tmp2 = 1.0 tmp3 = tmp1 * tmp2 tmp4 = float("-inf") tmp5 = tl.where(tmp0, tmp4, tmp3) tmp7 = tmp5 - tmp6 tmp8 = tl_math.exp(tmp7) tmp10 = tmp8 / tmp9 tmp11 = 0.0 tmp12 = tl.where(tmp0, tmp11, tmp10) tl.store(out_ptr0 + (x5), tmp12, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/6t/c6t5a5ere3lqjiu7zh3uu4oxmpdoujdaqqmeunxqapgzo4m74uav.py # Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone] # Source node to ATen node mapping: # contiguous => clone_4 # Graph fragment: # %clone_4 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_7,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_4 = async_compile.triton('triton_poi_fused_clone_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (4, ), (1, )) assert_size_stride(primals_9, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_10, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_11, (4, 4), (4, 1)) assert_size_stride(primals_12, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_6, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_9, (16, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf2) del primals_7 buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone] stream0 = get_raw_stream(0) triton_poi_fused_clone_0.run(buf0, primals_3, buf3, 16, 4, grid=grid(16, 4), stream=stream0) del primals_3 buf4 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone] triton_poi_fused_clone_0.run(buf1, primals_5, buf4, 16, 4, grid=grid(16, 4), stream=stream0) del primals_5 buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5) buf6 = empty_strided_cuda((4, 1, 4, 4), (16, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [mask], Original ATen: [aten.eq] triton_poi_fused_eq_1.run(primals_10, buf6, 64, grid=grid(64), stream=stream0) del primals_10 buf7 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 64), 0); del buf1 # reuse buf8 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) # Topologically Sorted Source Nodes: [scores, scores_1, softmax], Original ATen: [aten.div, aten.masked_fill, aten._softmax] triton_poi_fused__softmax_div_masked_fill_2.run(buf6, buf5, buf7, buf8, 64, grid=grid(64), stream=stream0) buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [scores, scores_1, softmax, attn], Original ATen: [aten.div, aten.masked_fill, aten._softmax] triton_poi_fused__softmax_div_masked_fill_3.run(buf6, buf5, buf7, buf8, buf9, 256, grid=grid(256), stream=stream0) buf10 = reinterpret_tensor(buf8, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf8 # reuse # Topologically Sorted Source Nodes: [x], Original ATen: [aten.clone] triton_poi_fused_clone_0.run(buf2, primals_8, buf10, 16, 4, grid=grid(16, 4), stream=stream0) del primals_8 buf11 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0); del buf2 # reuse # Topologically Sorted Source Nodes: [x], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf9, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf10, (16, 4, 1), (4, 1, 0), 0), out=buf11) buf12 = reinterpret_tensor(buf7, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf7 # reuse # Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone] triton_poi_fused_clone_4.run(buf11, buf12, 16, 4, grid=grid(16, 4), stream=stream0) buf13 = reinterpret_tensor(buf11, (16, 4), (4, 1), 0); del buf11 # reuse # Topologically Sorted Source Nodes: [linear_3], Original ATen: [aten.addmm] extern_kernels.addmm(primals_12, reinterpret_tensor(buf12, (16, 4), (4, 1), 0), reinterpret_tensor(primals_11, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf13) del primals_12 return (reinterpret_tensor(buf13, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (16, 4), (4, 1), 0), reinterpret_tensor(primals_9, (16, 4), (4, 1), 0), buf5, buf6, reinterpret_tensor(buf12, (16, 4), (4, 1), 0), primals_11, reinterpret_tensor(buf9, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf10, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch from typing import Optional from typing import Tuple from torch import nn class MultiHeadedAttention(nn.Module): """Multi-Head Attention layer. Args: n_head (int): The number of heads. n_feat (int): The number of features. dropout_rate (float): Dropout rate. """ def __init__(self, n_head: 'int', n_feat: 'int', dropout_rate: 'float'): """Construct an MultiHeadedAttention object.""" super().__init__() assert n_feat % n_head == 0 self.d_k = n_feat // n_head self.h = n_head self.linear_q = nn.Linear(n_feat, n_feat) self.linear_k = nn.Linear(n_feat, n_feat) self.linear_v = nn.Linear(n_feat, n_feat) self.linear_out = nn.Linear(n_feat, n_feat) self.dropout = nn.Dropout(p=dropout_rate) def forward_qkv(self, query: 'torch.Tensor', key: 'torch.Tensor', value: 'torch.Tensor') ->Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: """Transform query, key and value. Args: query (torch.Tensor): Query tensor (#batch, time1, size). key (torch.Tensor): Key tensor (#batch, time2, size). value (torch.Tensor): Value tensor (#batch, time2, size). Returns: torch.Tensor: Transformed query tensor, size (#batch, n_head, time1, d_k). torch.Tensor: Transformed key tensor, size (#batch, n_head, time2, d_k). torch.Tensor: Transformed value tensor, size (#batch, n_head, time2, d_k). """ n_batch = query.size(0) q = self.linear_q(query).view(n_batch, -1, self.h, self.d_k) k = self.linear_k(key).view(n_batch, -1, self.h, self.d_k) v = self.linear_v(value).view(n_batch, -1, self.h, self.d_k) q = q.transpose(1, 2) k = k.transpose(1, 2) v = v.transpose(1, 2) return q, k, v def forward_attention(self, value: 'torch.Tensor', scores: 'torch.Tensor', mask: 'Optional[torch.Tensor]') ->torch.Tensor: """Compute attention context vector. Args: value (torch.Tensor): Transformed value, size (#batch, n_head, time2, d_k). scores (torch.Tensor): Attention score, size (#batch, n_head, time1, time2). mask (torch.Tensor): Mask, size (#batch, 1, time2) or (#batch, time1, time2). Returns: torch.Tensor: Transformed value (#batch, time1, d_model) weighted by the attention score (#batch, time1, time2). """ n_batch = value.size(0) if mask is not None: mask = mask.unsqueeze(1).eq(0) scores = scores.masked_fill(mask, -float('inf')) attn = torch.softmax(scores, dim=-1).masked_fill(mask, 0.0) else: attn = torch.softmax(scores, dim=-1) p_attn = self.dropout(attn) x = torch.matmul(p_attn, value) x = x.transpose(1, 2).contiguous().view(n_batch, -1, self.h * self.d_k) return self.linear_out(x) def forward(self, query: 'torch.Tensor', key: 'torch.Tensor', value: 'torch.Tensor', mask: 'Optional[torch.Tensor]') ->torch.Tensor: """Compute scaled dot product attention. Args: query (torch.Tensor): Query tensor (#batch, time1, size). key (torch.Tensor): Key tensor (#batch, time2, size). value (torch.Tensor): Value tensor (#batch, time2, size). mask (torch.Tensor): Mask tensor (#batch, 1, time2) or (#batch, time1, time2). Returns: torch.Tensor: Output tensor (#batch, time1, d_model). """ q, k, v = self.forward_qkv(query, key, value) scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(self.d_k) return self.forward_attention(v, scores, mask) def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'n_head': 4, 'n_feat': 4, 'dropout_rate': 0.5}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from typing import Optional from typing import Tuple from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask) @triton.jit def triton_poi_fused_eq_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.0 tmp2 = tmp0 == tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused__softmax_div_masked_fill_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex // 16 x3 = xindex tmp0 = tl.load(in_ptr0 + (4 * x0 + 16 * x2), xmask, eviction_policy= 'evict_last').to(tl.int1) tmp1 = tl.load(in_ptr1 + 4 * x3, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (1 + 4 * x0 + 16 * x2), xmask, eviction_policy ='evict_last').to(tl.int1) tmp7 = tl.load(in_ptr1 + (1 + 4 * x3), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (2 + 4 * x0 + 16 * x2), xmask, eviction_policy='evict_last').to(tl.int1) tmp12 = tl.load(in_ptr1 + (2 + 4 * x3), xmask, eviction_policy='evict_last' ) tmp16 = tl.load(in_ptr0 + (3 + 4 * x0 + 16 * x2), xmask, eviction_policy='evict_last').to(tl.int1) tmp17 = tl.load(in_ptr1 + (3 + 4 * x3), xmask, eviction_policy='evict_last' ) tmp2 = 1.0 tmp3 = tmp1 * tmp2 tmp4 = float('-inf') tmp5 = tl.where(tmp0, tmp4, tmp3) tmp8 = tmp7 * tmp2 tmp9 = tl.where(tmp6, tmp4, tmp8) tmp10 = triton_helpers.maximum(tmp5, tmp9) tmp13 = tmp12 * tmp2 tmp14 = tl.where(tmp11, tmp4, tmp13) tmp15 = triton_helpers.maximum(tmp10, tmp14) tmp18 = tmp17 * tmp2 tmp19 = tl.where(tmp16, tmp4, tmp18) tmp20 = triton_helpers.maximum(tmp15, tmp19) tmp21 = tmp5 - tmp20 tmp22 = tl_math.exp(tmp21) tmp23 = tmp9 - tmp20 tmp24 = tl_math.exp(tmp23) tmp25 = tmp22 + tmp24 tmp26 = tmp14 - tmp20 tmp27 = tl_math.exp(tmp26) tmp28 = tmp25 + tmp27 tmp29 = tmp19 - tmp20 tmp30 = tl_math.exp(tmp29) tmp31 = tmp28 + tmp30 tl.store(out_ptr0 + x3, tmp20, xmask) tl.store(out_ptr1 + x3, tmp31, xmask) @triton.jit def triton_poi_fused__softmax_div_masked_fill_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex // 64 x4 = xindex % 16 x5 = xindex x6 = xindex // 4 tmp0 = tl.load(in_ptr0 + (x4 + 16 * x3), xmask, eviction_policy= 'evict_last').to(tl.int1) tmp1 = tl.load(in_ptr1 + x5, xmask) tmp6 = tl.load(in_ptr2 + x6, xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr3 + x6, xmask, eviction_policy='evict_last') tmp2 = 1.0 tmp3 = tmp1 * tmp2 tmp4 = float('-inf') tmp5 = tl.where(tmp0, tmp4, tmp3) tmp7 = tmp5 - tmp6 tmp8 = tl_math.exp(tmp7) tmp10 = tmp8 / tmp9 tmp11 = 0.0 tmp12 = tl.where(tmp0, tmp11, tmp10) tl.store(out_ptr0 + x5, tmp12, xmask) @triton.jit def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12 ) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_10, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_11, (4, 4), (4, 1)) assert_size_stride(primals_12, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_6, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_9, (16, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf2) del primals_7 buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(16, 4)](buf0, primals_3, buf3, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) del primals_3 buf4 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0) del buf0 triton_poi_fused_clone_0[grid(16, 4)](buf1, primals_5, buf4, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) del primals_5 buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5) buf6 = empty_strided_cuda((4, 1, 4, 4), (16, 16, 4, 1), torch.bool) triton_poi_fused_eq_1[grid(64)](primals_10, buf6, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_10 buf7 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 64), 0) del buf1 buf8 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) triton_poi_fused__softmax_div_masked_fill_2[grid(64)](buf6, buf5, buf7, buf8, 64, XBLOCK=64, num_warps=1, num_stages=1) buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_div_masked_fill_3[grid(256)](buf6, buf5, buf7, buf8, buf9, 256, XBLOCK=256, num_warps=4, num_stages=1) buf10 = reinterpret_tensor(buf8, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf8 triton_poi_fused_clone_0[grid(16, 4)](buf2, primals_8, buf10, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) del primals_8 buf11 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0) del buf2 extern_kernels.bmm(reinterpret_tensor(buf9, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf10, (16, 4, 1), (4, 1, 0), 0), out=buf11) buf12 = reinterpret_tensor(buf7, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf7 triton_poi_fused_clone_4[grid(16, 4)](buf11, buf12, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf13 = reinterpret_tensor(buf11, (16, 4), (4, 1), 0) del buf11 extern_kernels.addmm(primals_12, reinterpret_tensor(buf12, (16, 4), (4, 1), 0), reinterpret_tensor(primals_11, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf13) del primals_12 return reinterpret_tensor(buf13, (4, 4, 4), (16, 4, 1), 0 ), reinterpret_tensor(primals_1, (16, 4), (4, 1), 0 ), reinterpret_tensor(primals_6, (16, 4), (4, 1), 0 ), reinterpret_tensor(primals_9, (16, 4), (4, 1), 0 ), buf5, buf6, reinterpret_tensor(buf12, (16, 4), (4, 1), 0 ), primals_11, reinterpret_tensor(buf9, (16, 4, 4), (16, 1, 4), 0 ), reinterpret_tensor(buf10, (16, 1, 4), (4, 1, 1), 0 ), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0 ), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0) class MultiHeadedAttentionNew(nn.Module): """Multi-Head Attention layer. Args: n_head (int): The number of heads. n_feat (int): The number of features. dropout_rate (float): Dropout rate. """ def __init__(self, n_head: 'int', n_feat: 'int', dropout_rate: 'float'): """Construct an MultiHeadedAttention object.""" super().__init__() assert n_feat % n_head == 0 self.d_k = n_feat // n_head self.h = n_head self.linear_q = nn.Linear(n_feat, n_feat) self.linear_k = nn.Linear(n_feat, n_feat) self.linear_v = nn.Linear(n_feat, n_feat) self.linear_out = nn.Linear(n_feat, n_feat) self.dropout = nn.Dropout(p=dropout_rate) def forward_qkv(self, query: 'torch.Tensor', key: 'torch.Tensor', value: 'torch.Tensor') ->Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: """Transform query, key and value. Args: query (torch.Tensor): Query tensor (#batch, time1, size). key (torch.Tensor): Key tensor (#batch, time2, size). value (torch.Tensor): Value tensor (#batch, time2, size). Returns: torch.Tensor: Transformed query tensor, size (#batch, n_head, time1, d_k). torch.Tensor: Transformed key tensor, size (#batch, n_head, time2, d_k). torch.Tensor: Transformed value tensor, size (#batch, n_head, time2, d_k). """ n_batch = query.size(0) q = self.linear_q(query).view(n_batch, -1, self.h, self.d_k) k = self.linear_k(key).view(n_batch, -1, self.h, self.d_k) v = self.linear_v(value).view(n_batch, -1, self.h, self.d_k) q = q.transpose(1, 2) k = k.transpose(1, 2) v = v.transpose(1, 2) return q, k, v def forward_attention(self, value: 'torch.Tensor', scores: 'torch.Tensor', mask: 'Optional[torch.Tensor]') ->torch.Tensor: """Compute attention context vector. Args: value (torch.Tensor): Transformed value, size (#batch, n_head, time2, d_k). scores (torch.Tensor): Attention score, size (#batch, n_head, time1, time2). mask (torch.Tensor): Mask, size (#batch, 1, time2) or (#batch, time1, time2). Returns: torch.Tensor: Transformed value (#batch, time1, d_model) weighted by the attention score (#batch, time1, time2). """ n_batch = value.size(0) if mask is not None: mask = mask.unsqueeze(1).eq(0) scores = scores.masked_fill(mask, -float('inf')) attn = torch.softmax(scores, dim=-1).masked_fill(mask, 0.0) else: attn = torch.softmax(scores, dim=-1) p_attn = self.dropout(attn) x = torch.matmul(p_attn, value) x = x.transpose(1, 2).contiguous().view(n_batch, -1, self.h * self.d_k) return self.linear_out(x) def forward(self, input_0, input_1, input_2, input_3): primals_2 = self.linear_q.weight primals_3 = self.linear_q.bias primals_4 = self.linear_k.weight primals_5 = self.linear_k.bias primals_7 = self.linear_v.weight primals_8 = self.linear_v.bias primals_11 = self.linear_out.weight primals_12 = self.linear_out.bias primals_1 = input_0 primals_6 = input_1 primals_9 = input_2 primals_10 = input_3 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12]) return output[0]
Honghe/wenet
MultiHeadedAttention
false
5,320
[ "Apache-2.0" ]
1
4421790bec3778df591816d69f0449930a9be321
https://github.com/Honghe/wenet/tree/4421790bec3778df591816d69f0449930a9be321
import math import torch from typing import Optional from typing import Tuple from torch import nn class Model(nn.Module): """Multi-Head Attention layer. Args: n_head (int): The number of heads. n_feat (int): The number of features. dropout_rate (float): Dropout rate. """ def __init__(self, n_head: 'int', n_feat: 'int', dropout_rate: 'float'): """Construct an MultiHeadedAttention object.""" super().__init__() assert n_feat % n_head == 0 self.d_k = n_feat // n_head self.h = n_head self.linear_q = nn.Linear(n_feat, n_feat) self.linear_k = nn.Linear(n_feat, n_feat) self.linear_v = nn.Linear(n_feat, n_feat) self.linear_out = nn.Linear(n_feat, n_feat) self.dropout = nn.Dropout(p=dropout_rate) def forward_qkv(self, query: 'torch.Tensor', key: 'torch.Tensor', value: 'torch.Tensor') ->Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: """Transform query, key and value. Args: query (torch.Tensor): Query tensor (#batch, time1, size). key (torch.Tensor): Key tensor (#batch, time2, size). value (torch.Tensor): Value tensor (#batch, time2, size). Returns: torch.Tensor: Transformed query tensor, size (#batch, n_head, time1, d_k). torch.Tensor: Transformed key tensor, size (#batch, n_head, time2, d_k). torch.Tensor: Transformed value tensor, size (#batch, n_head, time2, d_k). """ n_batch = query.size(0) q = self.linear_q(query).view(n_batch, -1, self.h, self.d_k) k = self.linear_k(key).view(n_batch, -1, self.h, self.d_k) v = self.linear_v(value).view(n_batch, -1, self.h, self.d_k) q = q.transpose(1, 2) k = k.transpose(1, 2) v = v.transpose(1, 2) return q, k, v def forward_attention(self, value: 'torch.Tensor', scores: 'torch.Tensor', mask: 'Optional[torch.Tensor]') ->torch.Tensor: """Compute attention context vector. Args: value (torch.Tensor): Transformed value, size (#batch, n_head, time2, d_k). scores (torch.Tensor): Attention score, size (#batch, n_head, time1, time2). mask (torch.Tensor): Mask, size (#batch, 1, time2) or (#batch, time1, time2). Returns: torch.Tensor: Transformed value (#batch, time1, d_model) weighted by the attention score (#batch, time1, time2). """ n_batch = value.size(0) if mask is not None: mask = mask.unsqueeze(1).eq(0) scores = scores.masked_fill(mask, -float('inf')) attn = torch.softmax(scores, dim=-1).masked_fill(mask, 0.0) else: attn = torch.softmax(scores, dim=-1) p_attn = self.dropout(attn) x = torch.matmul(p_attn, value) x = x.transpose(1, 2).contiguous().view(n_batch, -1, self.h * self.d_k) return self.linear_out(x) def forward(self, query: 'torch.Tensor', key: 'torch.Tensor', value: 'torch.Tensor', mask: 'Optional[torch.Tensor]') ->torch.Tensor: """Compute scaled dot product attention. Args: query (torch.Tensor): Query tensor (#batch, time1, size). key (torch.Tensor): Key tensor (#batch, time2, size). value (torch.Tensor): Value tensor (#batch, time2, size). mask (torch.Tensor): Mask tensor (#batch, 1, time2) or (#batch, time1, time2). Returns: torch.Tensor: Output tensor (#batch, time1, d_model). """ q, k, v = self.forward_qkv(query, key, value) scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(self.d_k) return self.forward_attention(v, scores, mask) def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, # ... truncated (>4000 chars) for memory efficiency
MLPClassifier
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/md/cmd3ewacyhu5w5hausgbjbmtnt5rr66cgczh4ibdypq7dz6p4v7g.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # x_1 => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) # %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8192], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 8192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, None) tl.store(out_ptr0 + (x2), tmp6, None) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/xc/cxcj5l7s6w5ttrq2fk2nirlbp44yesw6n2m2dnxtpcjjmym2njhr.py # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # x_4 => relu_1 # Graph fragment: # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_3,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_relu_threshold_backward_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4096], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4096 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, None) tl.store(out_ptr0 + (x2), tmp6, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (128, 4), (4, 1)) assert_size_stride(primals_2, (128, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (64, 128), (128, 1)) assert_size_stride(primals_5, (64, ), (1, )) assert_size_stride(primals_6, (4, 64), (64, 1)) assert_size_stride(primals_7, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 128), (128, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 128), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 128), (2048, 512, 128, 1), 0); del buf0 # reuse buf6 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1), torch.bool) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf6, 8192, grid=grid(8192), stream=stream0) del primals_2 buf2 = empty_strided_cuda((64, 64), (64, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf1, (64, 128), (128, 1), 0), reinterpret_tensor(primals_4, (128, 64), (1, 128), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 64), (1024, 256, 64, 1), 0); del buf2 # reuse buf5 = empty_strided_cuda((4, 4, 4, 64), (1024, 256, 64, 1), torch.bool) # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_1.run(buf3, primals_5, buf5, 4096, grid=grid(4096), stream=stream0) del primals_5 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.addmm] extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 64), (64, 1), 0), reinterpret_tensor(primals_6, (64, 4), (1, 64), 0), alpha=1, beta=1, out=buf4) del primals_7 return (reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 128), (128, 1), 0), reinterpret_tensor(buf3, (64, 64), (64, 1), 0), primals_6, buf5, primals_4, buf6, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((128, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((64, 128), (128, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 64), (64, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class MLPClassifier(nn.Module): def __init__(self, input_dim, target_dim): super(MLPClassifier, self).__init__() self.input_dim = input_dim self.target_dim = target_dim self.fc1 = nn.Linear(self.input_dim, 128) self.fc2 = nn.Linear(128, 64) self.fc3 = nn.Linear(64, target_dim) self.relu = nn.ReLU() self.dropout = nn.Dropout(p=0.2) def forward(self, x): x = self.fc1(x) x = self.relu(x) x = self.dropout(x) x = self.fc2(x) x = self.relu(x) x = self.dropout(x) return self.fc3(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_dim': 4, 'target_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, None) tl.store(out_ptr0 + x2, tmp6, None) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, None) tl.store(out_ptr0 + x2, tmp6, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (128, 4), (4, 1)) assert_size_stride(primals_2, (128,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (64, 128), (128, 1)) assert_size_stride(primals_5, (64,), (1,)) assert_size_stride(primals_6, (4, 64), (64, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 128), (128, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 128), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 128), (2048, 512, 128, 1), 0) del buf0 buf6 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(8192)](buf1, primals_2, buf6, 8192, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 64), (64, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 128), (128, 1), 0), reinterpret_tensor(primals_4, (128, 64), (1, 128), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 64), (1024, 256, 64, 1), 0) del buf2 buf5 = empty_strided_cuda((4, 4, 4, 64), (1024, 256, 64, 1), torch.bool ) triton_poi_fused_relu_threshold_backward_1[grid(4096)](buf3, primals_5, buf5, 4096, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 64), (64, 1), 0), reinterpret_tensor(primals_6, (64, 4), (1, 64), 0), alpha=1, beta=1, out=buf4) del primals_7 return reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 128), (128, 1), 0 ), reinterpret_tensor(buf3, (64, 64), (64, 1), 0 ), primals_6, buf5, primals_4, buf6 class MLPClassifierNew(nn.Module): def __init__(self, input_dim, target_dim): super(MLPClassifierNew, self).__init__() self.input_dim = input_dim self.target_dim = target_dim self.fc1 = nn.Linear(self.input_dim, 128) self.fc2 = nn.Linear(128, 64) self.fc3 = nn.Linear(64, target_dim) self.relu = nn.ReLU() self.dropout = nn.Dropout(p=0.2) def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_6 = self.fc3.weight primals_7 = self.fc3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
ITU-AI-ML-in-5G-Challenge/-ITU-ML5G-PS-032-KDDI-naist-lsm
MLPClassifier
false
5,321
[ "MIT" ]
1
f0c54cfde8fb9a5b78e116de7814a1afbd856799
https://github.com/ITU-AI-ML-in-5G-Challenge/-ITU-ML5G-PS-032-KDDI-naist-lsm/tree/f0c54cfde8fb9a5b78e116de7814a1afbd856799
import torch import torch.nn as nn class Model(nn.Module): def __init__(self, input_dim, target_dim): super().__init__() self.input_dim = input_dim self.target_dim = target_dim self.fc1 = nn.Linear(self.input_dim, 128) self.fc2 = nn.Linear(128, 64) self.fc3 = nn.Linear(64, target_dim) self.relu = nn.ReLU() self.dropout = nn.Dropout(p=0.2) def forward(self, x): x = self.fc1(x) x = self.relu(x) x = self.dropout(x) x = self.fc2(x) x = self.relu(x) x = self.dropout(x) return self.fc3(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 4]
Minibatch_stddev_layer
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/kd/ckdiefxtppglvtsgjgaajhzhyzrxhhjcgisaniawn5rbjhy5zgvd.py # Topologically Sorted Source Nodes: [mean, y_1, pow_1, y_2, add, y_3, y_4, y_5, y_6], Original ATen: [aten.mean, aten.sub, aten.pow, aten.add, aten.sqrt, aten.repeat] # Source node to ATen node mapping: # add => add # mean => mean # pow_1 => pow_1 # y_1 => sub # y_2 => mean_1 # y_3 => sqrt # y_4 => mean_2 # y_5 => mean_3 # y_6 => repeat # Graph fragment: # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%view, [0], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view, %mean), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {}) # %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%pow_1, [0]), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean_1, 1e-08), kwargs = {}) # %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%add,), kwargs = {}) # %mean_2 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%sqrt, [2, 3, 4], True), kwargs = {}) # %mean_3 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%mean_2, [2]), kwargs = {}) # %repeat : [num_users=1] = call_function[target=torch.ops.aten.repeat.default](args = (%mean_3, [4, 1, 4, 4]), kwargs = {}) triton_per_fused_add_mean_pow_repeat_sqrt_sub_0 = async_compile.triton('triton_per_fused_add_mean_pow_repeat_sqrt_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=(2,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_mean_pow_repeat_sqrt_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_mean_pow_repeat_sqrt_sub_0(in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex r1 = rindex % 16 r2 = (rindex // 16) tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.load(in_ptr0 + (64 + r0), None) tmp3 = tl.load(in_ptr0 + (128 + r0), None) tmp5 = tl.load(in_ptr0 + (192 + r0), None) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-08 tmp22 = tmp20 + tmp21 tmp23 = libdevice.sqrt(tmp22) tmp24 = tl.broadcast_to(tmp23, [XBLOCK, RBLOCK]) tmp26 = tl.sum(tmp24, 1)[:, None] tmp27 = 64.0 tmp28 = tmp26 / tmp27 tmp29 = 1.0 tmp30 = tmp28 / tmp29 tl.store(out_ptr1 + (tl.broadcast_to(r1 + (80*r2), [XBLOCK, RBLOCK])), tmp30, None) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/yi/cyidf2yj3fms5jdxlfe7fdijzfj6p5a5q2qxo4llkuxnpqh6fj5o.py # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] # Source node to ATen node mapping: # cat => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%arg0_1, %repeat], 1), kwargs = {}) triton_poi_fused_cat_1 = async_compile.triton('triton_poi_fused_cat_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 x1 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x2), xmask) tl.store(out_ptr0 + (x0 + (80*x1)), tmp0, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf3 = empty_strided_cuda((4, 5, 4, 4), (80, 16, 4, 1), torch.float32) buf2 = reinterpret_tensor(buf3, (4, 1, 4, 4), (80, 16, 4, 1), 64) # alias # Topologically Sorted Source Nodes: [mean, y_1, pow_1, y_2, add, y_3, y_4, y_5, y_6], Original ATen: [aten.mean, aten.sub, aten.pow, aten.add, aten.sqrt, aten.repeat] stream0 = get_raw_stream(0) triton_per_fused_add_mean_pow_repeat_sqrt_sub_0.run(arg0_1, buf2, 1, 64, grid=grid(1), stream=stream0) buf1 = reinterpret_tensor(buf3, (4, 4, 4, 4), (80, 16, 4, 1), 0) # alias # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] triton_poi_fused_cat_1.run(arg0_1, buf1, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class Minibatch_stddev_layer(nn.Module): """ Minibatch standard deviation layer. (D_stylegan2) """ def __init__(self, group_size=4, num_new_features=1): super().__init__() self.group_size = group_size self.num_new_features = num_new_features def forward(self, x): n, c, h, w = x.shape group_size = min(n, self.group_size) y = x.view(group_size, -1, self.num_new_features, c // self. num_new_features, h, w) y = y - torch.mean(y, dim=0, keepdim=True) y = torch.mean(y ** 2, dim=0) y = torch.sqrt(y + 1e-08) y = torch.mean(y, dim=[2, 3, 4], keepdim=True) y = torch.mean(y, dim=2) y = y.repeat(group_size, 1, h, w) return torch.cat([x, y], 1) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_add_mean_pow_repeat_sqrt_sub_0(in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex r1 = rindex % 16 r2 = rindex // 16 tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr0 + (64 + r0), None) tmp3 = tl.load(in_ptr0 + (128 + r0), None) tmp5 = tl.load(in_ptr0 + (192 + r0), None) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-08 tmp22 = tmp20 + tmp21 tmp23 = libdevice.sqrt(tmp22) tmp24 = tl.broadcast_to(tmp23, [XBLOCK, RBLOCK]) tmp26 = tl.sum(tmp24, 1)[:, None] tmp27 = 64.0 tmp28 = tmp26 / tmp27 tmp29 = 1.0 tmp30 = tmp28 / tmp29 tl.store(out_ptr1 + tl.broadcast_to(r1 + 80 * r2, [XBLOCK, RBLOCK]), tmp30, None) @triton.jit def triton_poi_fused_cat_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 x1 = xindex // 64 tmp0 = tl.load(in_ptr0 + x2, xmask) tl.store(out_ptr0 + (x0 + 80 * x1), tmp0, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf3 = empty_strided_cuda((4, 5, 4, 4), (80, 16, 4, 1), torch.float32) buf2 = reinterpret_tensor(buf3, (4, 1, 4, 4), (80, 16, 4, 1), 64) get_raw_stream(0) triton_per_fused_add_mean_pow_repeat_sqrt_sub_0[grid(1)](arg0_1, buf2, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) buf1 = reinterpret_tensor(buf3, (4, 4, 4, 4), (80, 16, 4, 1), 0) triton_poi_fused_cat_1[grid(256)](arg0_1, buf1, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf3, class Minibatch_stddev_layerNew(nn.Module): """ Minibatch standard deviation layer. (D_stylegan2) """ def __init__(self, group_size=4, num_new_features=1): super().__init__() self.group_size = group_size self.num_new_features = num_new_features def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
Iceland-Leo/StyleGAN2_PyTorch
Minibatch_stddev_layer
false
5,322
[ "MIT" ]
1
3621f5e4ba1c7fde7e2fae1f4700d050656a0b02
https://github.com/Iceland-Leo/StyleGAN2_PyTorch/tree/3621f5e4ba1c7fde7e2fae1f4700d050656a0b02
import torch import torch.nn as nn class Model(nn.Module): """ Minibatch standard deviation layer. (D_stylegan2) """ def __init__(self, group_size=4, num_new_features=1): super().__init__() self.group_size = group_size self.num_new_features = num_new_features def forward(self, x): n, c, h, w = x.shape group_size = min(n, self.group_size) y = x.view(group_size, -1, self.num_new_features, c // self. num_new_features, h, w) y = y - torch.mean(y, dim=0, keepdim=True) y = torch.mean(y ** 2, dim=0) y = torch.sqrt(y + 1e-08) y = torch.mean(y, dim=[2, 3, 4], keepdim=True) y = torch.mean(y, dim=2) y = y.repeat(group_size, 1, h, w) return torch.cat([x, y], 1) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
ToRGB
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/wi/cwiyl3lwwtancorrifw77xt3aqb4lermdintht45zvkj3bg54nbl.py # Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul] # Source node to ATen node mapping: # mul => mul # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, 0.5), kwargs = {}) triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/t6/ct6tjcg37hwxssa3rmolxu36szytdluhb36zohxf24euvezqpnz3.py # Topologically Sorted Source Nodes: [weights_1], Original ATen: [aten.mul] # Source node to ATen node mapping: # weights_1 => mul_2 # Graph fragment: # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%unsqueeze_3, %unsqueeze_2), kwargs = {}) triton_poi_fused_mul_1 = async_compile.triton('triton_poi_fused_mul_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 48 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex % 12 x0 = xindex % 4 x2 = (xindex // 12) x4 = xindex tmp0 = tl.load(in_ptr0 + (x3), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x0 + (4*x2)), xmask, eviction_policy='evict_last') tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + (x4), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/5p/c5pdwxevzmislt5podm7lpbb7zxuwyge2eqknbi7uguxltgxojmi.py # Topologically Sorted Source Nodes: [add, leaky_relu], Original ATen: [aten.add, aten.leaky_relu, aten.leaky_relu_backward] # Source node to ATen node mapping: # add => add # leaky_relu => gt, mul_3, where # Graph fragment: # %add : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_2, %unsqueeze_6), kwargs = {}) # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%add, 0), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, 0.2), kwargs = {}) # %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt, %add, %mul_3), kwargs = {}) # %gt_1 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%where, 0), kwargs = {}) triton_poi_fused_add_leaky_relu_leaky_relu_backward_2 = async_compile.triton('triton_poi_fused_add_leaky_relu_leaky_relu_backward_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_leaky_relu_leaky_relu_backward_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_leaky_relu_leaky_relu_backward_2(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr0 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = 0.0 tmp5 = tmp3 > tmp4 tmp6 = 0.2 tmp7 = tmp3 * tmp6 tmp8 = tl.where(tmp5, tmp3, tmp7) tmp9 = tmp8 > tmp4 tl.store(in_out_ptr0 + (x0), tmp8, xmask) tl.store(out_ptr0 + (x0), tmp9, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_5, (3, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_6, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_mul_0.run(primals_1, buf0, 16, grid=grid(16), stream=stream0) del primals_1 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [style], Original ATen: [aten.addmm] extern_kernels.addmm(primals_2, primals_3, reinterpret_tensor(buf0, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1) del buf0 del primals_2 buf2 = empty_strided_cuda((4, 3, 4, 1, 1), (12, 4, 1, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [weights_1], Original ATen: [aten.mul] triton_poi_fused_mul_1.run(primals_5, buf1, buf2, 48, grid=grid(48), stream=stream0) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution] buf3 = extern_kernels.convolution(reinterpret_tensor(primals_4, (1, 16, 4, 4), (256, 16, 4, 1), 0), reinterpret_tensor(buf2, (12, 4, 1, 1), (4, 1, 0, 0), 0), stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=4, bias=None) assert_size_stride(buf3, (1, 12, 4, 4), (192, 16, 4, 1)) buf4 = reinterpret_tensor(buf3, (4, 3, 4, 4), (48, 16, 4, 1), 0); del buf3 # reuse buf5 = empty_strided_cuda((4, 3, 4, 4), (48, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [add, leaky_relu], Original ATen: [aten.add, aten.leaky_relu, aten.leaky_relu_backward] triton_poi_fused_add_leaky_relu_leaky_relu_backward_2.run(buf4, primals_6, buf5, 192, grid=grid(192), stream=stream0) del primals_6 return (buf4, primals_3, primals_5, buf1, reinterpret_tensor(primals_4, (1, 16, 4, 4), (256, 16, 4, 1), 0), reinterpret_tensor(buf2, (12, 4, 1, 1), (4, 1, 1, 1), 0), buf5, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((3, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch import numpy as np from torch import nn import torch.nn.functional as F import torch.utils.data from typing import List import torch.autograd class EqualizedWeight(nn.Module): """ <a id="equalized_weight"></a> ## Learning-rate Equalized Weights Parameter This is based on equalized learning rate introduced in the Progressive GAN paper. Instead of initializing weights at $\\mathcal{N}(0,c)$ they initialize weights to $\\mathcal{N}(0, 1)$ and then multiply them by $c$ when using it. $$w_i = c \\hat{w}_i$$ The gradients on stored parameters $\\hat{w}$ get multiplied by $c$ but this doesn't have an affect since optimizers such as Adam normalize them by a running mean of the squared gradients. The optimizer updates on $\\hat{w}$ are proportionate to the learning rate $\\lambda$. But the effective weights $w$ get updated proportionately to $c \\lambda$. Without equalized learning rate, the effective weights will get updated proportionately to just $\\lambda$. So we are effectively scaling the learning rate by $c$ for these weight parameters. """ def __init__(self, shape: 'List[int]'): """ * `shape` is the shape of the weight parameter """ super().__init__() self.c = 1 / math.sqrt(np.prod(shape[1:])) self.weight = nn.Parameter(torch.randn(shape)) def forward(self): return self.weight * self.c class EqualizedLinear(nn.Module): """ <a id="equalized_linear"></a> ## Learning-rate Equalized Linear Layer This uses [learning-rate equalized weights]($equalized_weights) for a linear layer. """ def __init__(self, in_features: 'int', out_features: 'int', bias: 'float'=0.0): """ * `in_features` is the number of features in the input feature map * `out_features` is the number of features in the output feature map * `bias` is the bias initialization constant """ super().__init__() self.weight = EqualizedWeight([out_features, in_features]) self.bias = nn.Parameter(torch.ones(out_features) * bias) def forward(self, x: 'torch.Tensor'): return F.linear(x, self.weight(), bias=self.bias) class Conv2dWeightModulate(nn.Module): """ ### Convolution with Weight Modulation and Demodulation This layer scales the convolution weights by the style vector and demodulates by normalizing it. """ def __init__(self, in_features: 'int', out_features: 'int', kernel_size: 'int', demodulate: 'float'=True, eps: 'float'=1e-08): """ * `in_features` is the number of features in the input feature map * `out_features` is the number of features in the output feature map * `kernel_size` is the size of the convolution kernel * `demodulate` is flag whether to normalize weights by its standard deviation * `eps` is the $\\epsilon$ for normalizing """ super().__init__() self.out_features = out_features self.demodulate = demodulate self.padding = (kernel_size - 1) // 2 self.weight = EqualizedWeight([out_features, in_features, kernel_size, kernel_size]) self.eps = eps def forward(self, x: 'torch.Tensor', s: 'torch.Tensor'): """ * `x` is the input feature map of shape `[batch_size, in_features, height, width]` * `s` is style based scaling tensor of shape `[batch_size, in_features]` """ b, _, h, w = x.shape s = s[:, None, :, None, None] weights = self.weight()[None, :, :, :, :] weights = weights * s if self.demodulate: sigma_inv = torch.rsqrt((weights ** 2).sum(dim=(2, 3, 4), keepdim=True) + self.eps) weights = weights * sigma_inv x = x.reshape(1, -1, h, w) _, _, *ws = weights.shape weights = weights.reshape(b * self.out_features, *ws) x = F.conv2d(x, weights, padding=self.padding, groups=b) return x.reshape(-1, self.out_features, h, w) class ToRGB(nn.Module): """ <a id="to_rgb"></a> ### To RGB ![To RGB](to_rgb.svg) *<small>$A$ denotes a linear layer.</small>* Generates an RGB image from a feature map using $1 imes 1$ convolution. """ def __init__(self, d_latent: 'int', features: 'int'): """ * `d_latent` is the dimensionality of $w$ * `features` is the number of features in the feature map """ super().__init__() self.to_style = EqualizedLinear(d_latent, features, bias=1.0) self.conv = Conv2dWeightModulate(features, 3, kernel_size=1, demodulate=False) self.bias = nn.Parameter(torch.zeros(1)) self.activation = nn.LeakyReLU(0.2, True) def forward(self, x: 'torch.Tensor', w: 'torch.Tensor'): """ * `x` is the input feature map of shape `[batch_size, in_features, height, width]` * `w` is $w$ with shape `[batch_size, d_latent]` """ style = self.to_style(w) x = self.conv(x, style) return self.activation(x + self.bias[None, :, None, None]) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'d_latent': 4, 'features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import math import numpy as np from torch import nn import torch.nn.functional as F import torch.utils.data from typing import List import torch.autograd assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_mul_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 48 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex % 12 x0 = xindex % 4 x2 = xindex // 12 x4 = xindex tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + x4, tmp4, xmask) @triton.jit def triton_poi_fused_add_leaky_relu_leaky_relu_backward_2(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = 0.0 tmp5 = tmp3 > tmp4 tmp6 = 0.2 tmp7 = tmp3 * tmp6 tmp8 = tl.where(tmp5, tmp3, tmp7) tmp9 = tmp8 > tmp4 tl.store(in_out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr0 + x0, tmp9, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_5, (3, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_6, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(16)](primals_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, primals_3, reinterpret_tensor(buf0, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1) del buf0 del primals_2 buf2 = empty_strided_cuda((4, 3, 4, 1, 1), (12, 4, 1, 1, 1), torch. float32) triton_poi_fused_mul_1[grid(48)](primals_5, buf1, buf2, 48, XBLOCK= 64, num_warps=1, num_stages=1) buf3 = extern_kernels.convolution(reinterpret_tensor(primals_4, (1, 16, 4, 4), (256, 16, 4, 1), 0), reinterpret_tensor(buf2, (12, 4, 1, 1), (4, 1, 0, 0), 0), stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=4, bias=None) assert_size_stride(buf3, (1, 12, 4, 4), (192, 16, 4, 1)) buf4 = reinterpret_tensor(buf3, (4, 3, 4, 4), (48, 16, 4, 1), 0) del buf3 buf5 = empty_strided_cuda((4, 3, 4, 4), (48, 16, 4, 1), torch.bool) triton_poi_fused_add_leaky_relu_leaky_relu_backward_2[grid(192)](buf4, primals_6, buf5, 192, XBLOCK=128, num_warps=4, num_stages=1) del primals_6 return buf4, primals_3, primals_5, buf1, reinterpret_tensor(primals_4, (1, 16, 4, 4), (256, 16, 4, 1), 0), reinterpret_tensor(buf2, (12, 4, 1, 1), (4, 1, 1, 1), 0), buf5 class EqualizedWeight(nn.Module): """ <a id="equalized_weight"></a> ## Learning-rate Equalized Weights Parameter This is based on equalized learning rate introduced in the Progressive GAN paper. Instead of initializing weights at $\\mathcal{N}(0,c)$ they initialize weights to $\\mathcal{N}(0, 1)$ and then multiply them by $c$ when using it. $$w_i = c \\hat{w}_i$$ The gradients on stored parameters $\\hat{w}$ get multiplied by $c$ but this doesn't have an affect since optimizers such as Adam normalize them by a running mean of the squared gradients. The optimizer updates on $\\hat{w}$ are proportionate to the learning rate $\\lambda$. But the effective weights $w$ get updated proportionately to $c \\lambda$. Without equalized learning rate, the effective weights will get updated proportionately to just $\\lambda$. So we are effectively scaling the learning rate by $c$ for these weight parameters. """ def __init__(self, shape: 'List[int]'): """ * `shape` is the shape of the weight parameter """ super().__init__() self.c = 1 / math.sqrt(np.prod(shape[1:])) self.weight = nn.Parameter(torch.randn(shape)) def forward(self): return self.weight * self.c class EqualizedLinear(nn.Module): """ <a id="equalized_linear"></a> ## Learning-rate Equalized Linear Layer This uses [learning-rate equalized weights]($equalized_weights) for a linear layer. """ def __init__(self, in_features: 'int', out_features: 'int', bias: 'float'=0.0): """ * `in_features` is the number of features in the input feature map * `out_features` is the number of features in the output feature map * `bias` is the bias initialization constant """ super().__init__() self.weight = EqualizedWeight([out_features, in_features]) self.bias = nn.Parameter(torch.ones(out_features) * bias) def forward(self, x: 'torch.Tensor'): return F.linear(x, self.weight(), bias=self.bias) class Conv2dWeightModulate(nn.Module): """ ### Convolution with Weight Modulation and Demodulation This layer scales the convolution weights by the style vector and demodulates by normalizing it. """ def __init__(self, in_features: 'int', out_features: 'int', kernel_size: 'int', demodulate: 'float'=True, eps: 'float'=1e-08): """ * `in_features` is the number of features in the input feature map * `out_features` is the number of features in the output feature map * `kernel_size` is the size of the convolution kernel * `demodulate` is flag whether to normalize weights by its standard deviation * `eps` is the $\\epsilon$ for normalizing """ super().__init__() self.out_features = out_features self.demodulate = demodulate self.padding = (kernel_size - 1) // 2 self.weight = EqualizedWeight([out_features, in_features, kernel_size, kernel_size]) self.eps = eps def forward(self, x: 'torch.Tensor', s: 'torch.Tensor'): """ * `x` is the input feature map of shape `[batch_size, in_features, height, width]` * `s` is style based scaling tensor of shape `[batch_size, in_features]` """ b, _, h, w = x.shape s = s[:, None, :, None, None] weights = self.weight()[None, :, :, :, :] weights = weights * s if self.demodulate: sigma_inv = torch.rsqrt((weights ** 2).sum(dim=(2, 3, 4), keepdim=True) + self.eps) weights = weights * sigma_inv x = x.reshape(1, -1, h, w) _, _, *ws = weights.shape weights = weights.reshape(b * self.out_features, *ws) x = F.conv2d(x, weights, padding=self.padding, groups=b) return x.reshape(-1, self.out_features, h, w) class ToRGBNew(nn.Module): """ <a id="to_rgb"></a> ### To RGB ![To RGB](to_rgb.svg) *<small>$A$ denotes a linear layer.</small>* Generates an RGB image from a feature map using $1 imes 1$ convolution. """ def __init__(self, d_latent: 'int', features: 'int'): """ * `d_latent` is the dimensionality of $w$ * `features` is the number of features in the feature map """ super().__init__() self.to_style = EqualizedLinear(d_latent, features, bias=1.0) self.conv = Conv2dWeightModulate(features, 3, kernel_size=1, demodulate=False) self.bias = nn.Parameter(torch.zeros(1)) self.activation = nn.LeakyReLU(0.2, True) def forward(self, input_0, input_1): primals_6 = self.bias primals_2 = self.to_style.bias primals_1 = self.to_style.weight.weight primals_5 = self.conv.weight.weight primals_4 = input_0 primals_3 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
HubBucket-Team/annotated_deep_learning_paper_implementations
ToRGB
false
5,323
[ "MIT" ]
1
4a9716b01e336c57739dfdbdd90648276b53c433
https://github.com/HubBucket-Team/annotated_deep_learning_paper_implementations/tree/4a9716b01e336c57739dfdbdd90648276b53c433
import math import torch import numpy as np from torch import nn import torch.nn.functional as F import torch.utils.data from typing import List import torch.autograd class EqualizedWeight(nn.Module): """ <a id="equalized_weight"></a> ## Learning-rate Equalized Weights Parameter This is based on equalized learning rate introduced in the Progressive GAN paper. Instead of initializing weights at $\\mathcal{N}(0,c)$ they initialize weights to $\\mathcal{N}(0, 1)$ and then multiply them by $c$ when using it. $$w_i = c \\hat{w}_i$$ The gradients on stored parameters $\\hat{w}$ get multiplied by $c$ but this doesn't have an affect since optimizers such as Adam normalize them by a running mean of the squared gradients. The optimizer updates on $\\hat{w}$ are proportionate to the learning rate $\\lambda$. But the effective weights $w$ get updated proportionately to $c \\lambda$. Without equalized learning rate, the effective weights will get updated proportionately to just $\\lambda$. So we are effectively scaling the learning rate by $c$ for these weight parameters. """ def __init__(self, shape: 'List[int]'): """ * `shape` is the shape of the weight parameter """ super().__init__() self.c = 1 / math.sqrt(np.prod(shape[1:])) self.weight = nn.Parameter(torch.randn(shape)) def forward(self): return self.weight * self.c class EqualizedLinear(nn.Module): """ <a id="equalized_linear"></a> ## Learning-rate Equalized Linear Layer This uses [learning-rate equalized weights]($equalized_weights) for a linear layer. """ def __init__(self, in_features: 'int', out_features: 'int', bias: 'float'=0.0): """ * `in_features` is the number of features in the input feature map * `out_features` is the number of features in the output feature map * `bias` is the bias initialization constant """ super().__init__() self.weight = EqualizedWeight([out_features, in_features]) self.bias = nn.Parameter(torch.ones(out_features) * bias) def forward(self, x: 'torch.Tensor'): return F.linear(x, self.weight(), bias=self.bias) class Conv2dWeightModulate(nn.Module): """ ### Convolution with Weight Modulation and Demodulation This layer scales the convolution weights by the style vector and demodulates by normalizing it. """ def __init__(self, in_features: 'int', out_features: 'int', kernel_size: 'int', demodulate: 'float'=True, eps: 'float'=1e-08): """ * `in_features` is the number of features in the input feature map * `out_features` is the number of features in the output feature map * `kernel_size` is the size of the convolution kernel * `demodulate` is flag whether to normalize weights by its standard deviation * `eps` is the $\\epsilon$ for normalizing """ super().__init__() self.out_features = out_features self.demodulate = demodulate self.padding = (kernel_size - 1) // 2 self.weight = EqualizedWeight([out_features, in_features, kernel_size, kernel_size]) self.eps = eps def forward(self, x: 'torch.Tensor', s: 'torch.Tensor'): """ * `x` is the input feature map of shape `[batch_size, in_features, height, width]` * `s` is style based scaling tensor of shape `[batch_size, in_features]` """ b, _, h, w = x.shape s = s[:, None, :, None, None] weights = self.weight()[None, :, :, :, :] weights = weights * s if self.demodulate: sigma_inv = torch.rsqrt((weights ** 2).sum(dim=(2, 3, 4), keepdim=True) + self.eps) weights = weights * sigma_inv x = x.reshape(1, -1, h, w) _, _, *ws = weights.shape weights = weights.reshape(b * self.out_features, *ws) # ... truncated (>4000 chars) for memory efficiency
Head
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/oz/cozqxlcluuaqzreyfue6z5fkzxjeuuwqcorl77txds26n7irqcna.py # Topologically Sorted Source Nodes: [h, h_1], Original ATen: [aten.convolution, aten.leaky_relu] # Source node to ATen node mapping: # h => convolution # h_1 => gt, mul, where # Graph fragment: # %convolution : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution, 0), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, 0.1), kwargs = {}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %convolution, %mul), kwargs = {}) triton_poi_fused_convolution_leaky_relu_0 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 4 tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.1 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + (x3), tmp4, xmask) tl.store(out_ptr1 + (x3), tmp7, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 64), (64, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [h], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [h, h_1], Original ATen: [aten.convolution, aten.leaky_relu] stream0 = get_raw_stream(0) triton_poi_fused_convolution_leaky_relu_0.run(buf0, primals_2, buf1, buf2, 256, grid=grid(256), stream=stream0) del buf0 del primals_2 buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [h_2], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf2, (4, 64), (64, 1), 0), reinterpret_tensor(primals_4, (64, 4), (1, 64), 0), out=buf3) return (buf3, primals_1, primals_3, buf1, reinterpret_tensor(buf2, (4, 64), (64, 1), 0), primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 64), (64, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class Conv(nn.Module): def __init__(self, filters0, filters1, kernel_size, bn, bias=True): super().__init__() if bn: bias = False self.conv = nn.Conv2d(filters0, filters1, kernel_size, stride=1, padding=kernel_size // 2, bias=bias) self.bn = nn.BatchNorm2d(filters1) if bn else None def forward(self, x): h = self.conv(x) if self.bn is not None: h = self.bn(h) return h class Head(nn.Module): def __init__(self, input_size, out_filters, outputs): super().__init__() self.board_size = input_size[1] * input_size[2] self.out_filters = out_filters self.conv = Conv(input_size[0], out_filters, 1, bn=False) self.activation = nn.LeakyReLU(0.1) self.fc = nn.Linear(self.board_size * out_filters, outputs, bias=False) def forward(self, x): h = self.activation(self.conv(x)) h = self.fc(h.view(-1, self.board_size * self.out_filters)) return h def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_size': [4, 4, 4], 'out_filters': 4, 'outputs': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.1 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x3, tmp4, xmask) tl.store(out_ptr1 + x3, tmp7, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 64), (64, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_convolution_leaky_relu_0[grid(256)](buf0, primals_2, buf1, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf0 del primals_2 buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf2, (4, 64), (64, 1), 0), reinterpret_tensor(primals_4, (64, 4), (1, 64), 0), out=buf3) return buf3, primals_1, primals_3, buf1, reinterpret_tensor(buf2, (4, 64), (64, 1), 0), primals_4 class Conv(nn.Module): def __init__(self, filters0, filters1, kernel_size, bn, bias=True): super().__init__() if bn: bias = False self.conv = nn.Conv2d(filters0, filters1, kernel_size, stride=1, padding=kernel_size // 2, bias=bias) self.bn = nn.BatchNorm2d(filters1) if bn else None def forward(self, x): h = self.conv(x) if self.bn is not None: h = self.bn(h) return h class HeadNew(nn.Module): def __init__(self, input_size, out_filters, outputs): super().__init__() self.board_size = input_size[1] * input_size[2] self.out_filters = out_filters self.conv = Conv(input_size[0], out_filters, 1, bn=False) self.activation = nn.LeakyReLU(0.1) self.fc = nn.Linear(self.board_size * out_filters, outputs, bias=False) def forward(self, input_0): primals_1 = self.conv.conv.weight primals_2 = self.conv.conv.bias primals_4 = self.fc.weight primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
Hiroki9759/HandyRL
Head
false
5,324
[ "MIT" ]
1
7d4dc869ba2f657d65fc461be4bed2d90dd0343b
https://github.com/Hiroki9759/HandyRL/tree/7d4dc869ba2f657d65fc461be4bed2d90dd0343b
import torch import torch.nn as nn class Conv(nn.Module): def __init__(self, filters0, filters1, kernel_size, bn, bias=True): super().__init__() if bn: bias = False self.conv = nn.Conv2d(filters0, filters1, kernel_size, stride=1, padding=kernel_size // 2, bias=bias) self.bn = nn.BatchNorm2d(filters1) if bn else None def forward(self, x): h = self.conv(x) if self.bn is not None: h = self.bn(h) return h class Model(nn.Module): def __init__(self, input_size, out_filters, outputs): super().__init__() self.board_size = input_size[1] * input_size[2] self.out_filters = out_filters self.conv = Conv(input_size[0], out_filters, 1, bn=False) self.activation = nn.LeakyReLU(0.1) self.fc = nn.Linear(self.board_size * out_filters, outputs, bias=False) def forward(self, x): h = self.activation(self.conv(x)) h = self.fc(h.view(-1, self.board_size * self.out_filters)) return h def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 4]
Encoding
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/yx/cyx5u6kg47bcb2a4mvrlkw5ynd42h4mj76hk2j6tveptehbkmic4.py # Topologically Sorted Source Nodes: [sub, pow_1, sum_1, scaled_l2_norm], Original ATen: [aten.sub, aten.pow, aten.sum, aten.mul] # Source node to ATen node mapping: # pow_1 => pow_1 # scaled_l2_norm => mul # sub => sub # sum_1 => sum_1 # Graph fragment: # %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%expand, %view_2), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [3]), kwargs = {}) # %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, %sum_1), kwargs = {}) triton_poi_fused_mul_pow_sub_sum_0 = async_compile.triton('triton_poi_fused_mul_pow_sub_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_pow_sub_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_pow_sub_sum_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) % 16 x2 = (xindex // 64) x4 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x1 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + (4*x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + (16 + x1 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr2 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr1 + (32 + x1 + (64*x2)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr2 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr1 + (48 + x1 + (64*x2)), xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr2 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp3 = tmp1 - tmp2 tmp4 = tmp3 * tmp3 tmp7 = tmp5 - tmp6 tmp8 = tmp7 * tmp7 tmp9 = tmp4 + tmp8 tmp12 = tmp10 - tmp11 tmp13 = tmp12 * tmp12 tmp14 = tmp9 + tmp13 tmp17 = tmp15 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp14 + tmp18 tmp20 = tmp0 * tmp19 tl.store(out_ptr0 + (x4), tmp20, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/hz/chz2sqsqk26mwhf2dxhgh44jfpu2er5yqjftwkzfav5ctqtx5e7f.py # Topologically Sorted Source Nodes: [assigment_weights], Original ATen: [aten._softmax] # Source node to ATen node mapping: # assigment_weights => amax, exp, sub_1 # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul, [2], True), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_1,), kwargs = {}) triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x2), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/3f/c3fx6bzkalkw7u7askqdnz4rzlcoyqiec4r434sjc5x3axxgkrmr.py # Topologically Sorted Source Nodes: [assigment_weights], Original ATen: [aten._softmax] # Source node to ATen node mapping: # assigment_weights => div, sum_2 # Graph fragment: # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [2], True), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_2), kwargs = {}) triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/36/c36bunrpokrzs5svt4e6kwmfyyitmjh7nivu5rc6sidx55znumrf.py # Topologically Sorted Source Nodes: [sub, mul_1, encoded_feat], Original ATen: [aten.sub, aten.mul, aten.sum] # Source node to ATen node mapping: # encoded_feat => sum_3 # mul_1 => mul_1 # sub => sub # Graph fragment: # %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%expand, %view_2), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%unsqueeze_2, %sub), kwargs = {}) # %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_1, [1]), kwargs = {}) triton_per_fused_mul_sub_sum_3 = async_compile.triton('triton_per_fused_mul_sub_sum_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[64, 16], reduction_hint=ReductionHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mul_sub_sum_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_mul_sub_sum_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 64 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r3 = rindex x1 = (xindex // 4) % 4 x2 = (xindex // 16) x0 = xindex % 4 x4 = xindex % 16 x5 = xindex tmp0 = tl.load(in_ptr0 + (x1 + (4*r3) + (64*x2)), xmask, eviction_policy='evict_last', other=0.0) tmp1 = tl.load(in_ptr1 + (r3 + (16*x0) + (64*x2)), xmask, eviction_policy='evict_last', other=0.0) tmp2 = tl.load(in_ptr2 + (x4), xmask, eviction_policy='evict_last') tmp3 = tmp1 - tmp2 tmp4 = tmp0 * tmp3 tmp5 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK]) tmp7 = tl.where(xmask, tmp5, 0) tmp8 = tl.sum(tmp7, 1)[:, None] tl.store(out_ptr0 + (x5), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [sub, pow_1, sum_1, scaled_l2_norm], Original ATen: [aten.sub, aten.pow, aten.sum, aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_mul_pow_sub_sum_0.run(primals_3, primals_1, primals_2, buf0, 256, grid=grid(256), stream=stream0) buf1 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [assigment_weights], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf0, buf1, 256, grid=grid(256), stream=stream0) buf2 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [assigment_weights], Original ATen: [aten._softmax] triton_poi_fused__softmax_2.run(buf1, buf2, 256, grid=grid(256), stream=stream0) del buf1 buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [sub, mul_1, encoded_feat], Original ATen: [aten.sub, aten.mul, aten.sum] triton_per_fused_mul_sub_sum_3.run(buf2, primals_1, primals_2, buf3, 64, 16, grid=grid(64), stream=stream0) del buf2 return (buf3, primals_1, primals_2, primals_3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F import torch.nn as nn import torch._C import torch.serialization class Encoding(nn.Module): """Encoding Layer: a learnable residual encoder. Input is of shape (batch_size, channels, height, width). Output is of shape (batch_size, num_codes, channels). Args: channels: dimension of the features or feature channels num_codes: number of code words """ def __init__(self, channels, num_codes): super(Encoding, self).__init__() self.channels, self.num_codes = channels, num_codes std = 1.0 / (num_codes * channels) ** 0.5 self.codewords = nn.Parameter(torch.empty(num_codes, channels, dtype=torch.float).uniform_(-std, std), requires_grad=True) self.scale = nn.Parameter(torch.empty(num_codes, dtype=torch.float) .uniform_(-1, 0), requires_grad=True) @staticmethod def scaled_l2(x, codewords, scale): num_codes, channels = codewords.size() batch_size = x.size(0) reshaped_scale = scale.view((1, 1, num_codes)) expanded_x = x.unsqueeze(2).expand((batch_size, x.size(1), num_codes, channels)) reshaped_codewords = codewords.view((1, 1, num_codes, channels)) scaled_l2_norm = reshaped_scale * (expanded_x - reshaped_codewords ).pow(2).sum(dim=3) return scaled_l2_norm @staticmethod def aggregate(assigment_weights, x, codewords): num_codes, channels = codewords.size() reshaped_codewords = codewords.view((1, 1, num_codes, channels)) batch_size = x.size(0) expanded_x = x.unsqueeze(2).expand((batch_size, x.size(1), num_codes, channels)) encoded_feat = (assigment_weights.unsqueeze(3) * (expanded_x - reshaped_codewords)).sum(dim=1) return encoded_feat def forward(self, x): assert x.dim() == 4 and x.size(1) == self.channels batch_size = x.size(0) x = x.view(batch_size, self.channels, -1).transpose(1, 2).contiguous() assigment_weights = F.softmax(self.scaled_l2(x, self.codewords, self.scale), dim=2) encoded_feat = self.aggregate(assigment_weights, x, self.codewords) return encoded_feat def __repr__(self): repr_str = self.__class__.__name__ repr_str += ( f'(Nx{self.channels}xHxW =>Nx{self.num_codes}x{self.channels})') return repr_str def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'channels': 4, 'num_codes': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch._C import torch.serialization assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mul_pow_sub_sum_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 16 x2 = xindex // 64 x4 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x1 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr2 + 4 * x0, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + (16 + x1 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr2 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr1 + (32 + x1 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp11 = tl.load(in_ptr2 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp15 = tl.load(in_ptr1 + (48 + x1 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp16 = tl.load(in_ptr2 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp3 = tmp1 - tmp2 tmp4 = tmp3 * tmp3 tmp7 = tmp5 - tmp6 tmp8 = tmp7 * tmp7 tmp9 = tmp4 + tmp8 tmp12 = tmp10 - tmp11 tmp13 = tmp12 * tmp12 tmp14 = tmp9 + tmp13 tmp17 = tmp15 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp14 + tmp18 tmp20 = tmp0 * tmp19 tl.store(out_ptr0 + x4, tmp20, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_per_fused_mul_sub_sum_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 64 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r3 = rindex x1 = xindex // 4 % 4 x2 = xindex // 16 x0 = xindex % 4 x4 = xindex % 16 x5 = xindex tmp0 = tl.load(in_ptr0 + (x1 + 4 * r3 + 64 * x2), xmask, eviction_policy='evict_last', other=0.0) tmp1 = tl.load(in_ptr1 + (r3 + 16 * x0 + 64 * x2), xmask, eviction_policy='evict_last', other=0.0) tmp2 = tl.load(in_ptr2 + x4, xmask, eviction_policy='evict_last') tmp3 = tmp1 - tmp2 tmp4 = tmp0 * tmp3 tmp5 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK]) tmp7 = tl.where(xmask, tmp5, 0) tmp8 = tl.sum(tmp7, 1)[:, None] tl.store(out_ptr0 + x5, tmp8, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_pow_sub_sum_0[grid(256)](primals_3, primals_1, primals_2, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) buf1 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.float32) triton_poi_fused__softmax_1[grid(256)](buf0, buf1, 256, XBLOCK=128, num_warps=4, num_stages=1) buf2 = buf0 del buf0 triton_poi_fused__softmax_2[grid(256)](buf1, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf1 buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_per_fused_mul_sub_sum_3[grid(64)](buf2, primals_1, primals_2, buf3, 64, 16, XBLOCK=1, num_warps=2, num_stages=1) del buf2 return buf3, primals_1, primals_2, primals_3 class EncodingNew(nn.Module): """Encoding Layer: a learnable residual encoder. Input is of shape (batch_size, channels, height, width). Output is of shape (batch_size, num_codes, channels). Args: channels: dimension of the features or feature channels num_codes: number of code words """ def __init__(self, channels, num_codes): super(EncodingNew, self).__init__() self.channels, self.num_codes = channels, num_codes std = 1.0 / (num_codes * channels) ** 0.5 self.codewords = nn.Parameter(torch.empty(num_codes, channels, dtype=torch.float).uniform_(-std, std), requires_grad=True) self.scale = nn.Parameter(torch.empty(num_codes, dtype=torch.float) .uniform_(-1, 0), requires_grad=True) @staticmethod def scaled_l2(x, codewords, scale): num_codes, channels = codewords.size() batch_size = x.size(0) reshaped_scale = scale.view((1, 1, num_codes)) expanded_x = x.unsqueeze(2).expand((batch_size, x.size(1), num_codes, channels)) reshaped_codewords = codewords.view((1, 1, num_codes, channels)) scaled_l2_norm = reshaped_scale * (expanded_x - reshaped_codewords ).pow(2).sum(dim=3) return scaled_l2_norm @staticmethod def aggregate(assigment_weights, x, codewords): num_codes, channels = codewords.size() reshaped_codewords = codewords.view((1, 1, num_codes, channels)) batch_size = x.size(0) expanded_x = x.unsqueeze(2).expand((batch_size, x.size(1), num_codes, channels)) encoded_feat = (assigment_weights.unsqueeze(3) * (expanded_x - reshaped_codewords)).sum(dim=1) return encoded_feat def __repr__(self): repr_str = self.__class__.__name__ repr_str += ( f'(Nx{self.channels}xHxW =>Nx{self.num_codes}x{self.channels})') return repr_str def forward(self, input_0): primals_2 = self.codewords primals_3 = self.scale primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
HusterRC/mmsegmentation
Encoding
false
5,325
[ "Apache-2.0" ]
1
c3e4dbc2e06de3f47f75098f76772b4ee7e91e35
https://github.com/HusterRC/mmsegmentation/tree/c3e4dbc2e06de3f47f75098f76772b4ee7e91e35
import torch import torch.nn.functional as F import torch.nn as nn import torch._C import torch.serialization class Model(nn.Module): """Encoding Layer: a learnable residual encoder. Input is of shape (batch_size, channels, height, width). Output is of shape (batch_size, num_codes, channels). Args: channels: dimension of the features or feature channels num_codes: number of code words """ def __init__(self, channels, num_codes): super().__init__() self.channels, self.num_codes = channels, num_codes std = 1.0 / (num_codes * channels) ** 0.5 self.codewords = nn.Parameter(torch.empty(num_codes, channels, dtype=torch.float).uniform_(-std, std), requires_grad=True) self.scale = nn.Parameter(torch.empty(num_codes, dtype=torch.float) .uniform_(-1, 0), requires_grad=True) @staticmethod def scaled_l2(x, codewords, scale): num_codes, channels = codewords.size() batch_size = x.size(0) reshaped_scale = scale.view((1, 1, num_codes)) expanded_x = x.unsqueeze(2).expand((batch_size, x.size(1), num_codes, channels)) reshaped_codewords = codewords.view((1, 1, num_codes, channels)) scaled_l2_norm = reshaped_scale * (expanded_x - reshaped_codewords ).pow(2).sum(dim=3) return scaled_l2_norm @staticmethod def aggregate(assigment_weights, x, codewords): num_codes, channels = codewords.size() reshaped_codewords = codewords.view((1, 1, num_codes, channels)) batch_size = x.size(0) expanded_x = x.unsqueeze(2).expand((batch_size, x.size(1), num_codes, channels)) encoded_feat = (assigment_weights.unsqueeze(3) * (expanded_x - reshaped_codewords)).sum(dim=1) return encoded_feat def forward(self, x): assert x.dim() == 4 and x.size(1) == self.channels batch_size = x.size(0) x = x.view(batch_size, self.channels, -1).transpose(1, 2).contiguous() assigment_weights = F.softmax(self.scaled_l2(x, self.codewords, self.scale), dim=2) encoded_feat = self.aggregate(assigment_weights, x, self.codewords) return encoded_feat def __repr__(self): repr_str = self.__class__.__name__ repr_str += ( f'(Nx{self.channels}xHxW =>Nx{self.num_codes}x{self.channels})') return repr_str def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 4]
ExampleBackbone
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/ft/cftqaeqt35oge5l3bbpv3uhleqvp2lsejqwbjdklod7sy6k66dz2.py # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] # Source node to ATen node mapping: # conv2d => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 46128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 3844) % 3 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (3, 3, 3, 3), (27, 9, 3, 1)) assert_size_stride(primals_2, (3, ), (1, )) assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 3, 62, 62), (11532, 3844, 62, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] stream0 = get_raw_stream(0) triton_poi_fused_convolution_0.run(buf1, primals_2, 46128, grid=grid(46128), stream=stream0) del primals_2 return (buf1, primals_1, primals_3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((3, 3, 3, 3), (27, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((3, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 3, 64, 64), (12288, 4096, 64, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch._C import torch.serialization class ExampleBackbone(nn.Module): def __init__(self): super(ExampleBackbone, self).__init__() self.conv = nn.Conv2d(3, 3, 3) def init_weights(self, pretrained=None): pass def forward(self, x): return [self.conv(x)] def get_inputs(): return [torch.rand([4, 3, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch._C import torch.serialization assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 46128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 3844 % 3 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (3, 3, 3, 3), (27, 9, 3, 1)) assert_size_stride(primals_2, (3,), (1,)) assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 3, 62, 62), (11532, 3844, 62, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(46128)](buf1, primals_2, 46128, XBLOCK=512, num_warps=4, num_stages=1) del primals_2 return buf1, primals_1, primals_3 class ExampleBackboneNew(nn.Module): def __init__(self): super(ExampleBackboneNew, self).__init__() self.conv = nn.Conv2d(3, 3, 3) def init_weights(self, pretrained=None): pass def forward(self, input_0): primals_1 = self.conv.weight primals_2 = self.conv.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
HusterRC/mmsegmentation
ExampleBackbone
false
5,326
[ "Apache-2.0" ]
1
c3e4dbc2e06de3f47f75098f76772b4ee7e91e35
https://github.com/HusterRC/mmsegmentation/tree/c3e4dbc2e06de3f47f75098f76772b4ee7e91e35
import torch import torch.nn as nn import torch._C import torch.serialization class Model(nn.Module): def __init__(self): super().__init__() self.conv = nn.Conv2d(3, 3, 3) def init_weights(self, pretrained=None): pass def forward(self, x): return [self.conv(x)] def get_inputs(): return [torch.rand([4, 3, 64, 64])] def get_init_inputs(): return []
FCUDown
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/xr/cxrxkhknnmqboqvb3m3mjmm4siolsh3ku22r7a466e4aw3hotnhv.py # Topologically Sorted Source Nodes: [x, avg_pool2d], Original ATen: [aten.convolution, aten.avg_pool2d] # Source node to ATen node mapping: # avg_pool2d => avg_pool2d # x => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%unsqueeze, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %avg_pool2d : [num_users=2] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%squeeze, [1, 1], [1, 1]), kwargs = {}) triton_poi_fused_avg_pool2d_convolution_0 = async_compile.triton('triton_poi_fused_avg_pool2d_convolution_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_avg_pool2d_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_avg_pool2d_convolution_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 16) tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(in_out_ptr0 + (x2), tmp2, xmask) tl.store(out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/rm/crmilvqoo2zvjnggsrmfaoq5nbufrq2d66z26b5m33is7l7oj4de.py # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.native_layer_norm] # Source node to ATen node mapping: # x_2 => add, clone, rsqrt, var_mean # Graph fragment: # %clone : [num_users=2] = call_function[target=torch.ops.aten.clone.default](args = (%permute,), kwargs = {memory_format: torch.contiguous_format}) # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%clone, [2]), kwargs = {correction: 0, keepdim: True}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-06), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) triton_poi_fused_native_layer_norm_1 = async_compile.triton('triton_poi_fused_native_layer_norm_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_layer_norm_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (16*x1)), xmask) tmp1 = tl.load(in_ptr0 + (4 + x0 + (16*x1)), xmask) tmp3 = tl.load(in_ptr0 + (8 + x0 + (16*x1)), xmask) tmp5 = tl.load(in_ptr0 + (12 + x0 + (16*x1)), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-06 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + (x2), tmp8, xmask) tl.store(out_ptr1 + (x2), tmp23, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/sz/cszqoetc5h77ez4a7xxqisjxr7wumihc5w3yeytxqfsyjswutuy2.py # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.native_layer_norm] # Source node to ATen node mapping: # x_2 => add, add_1, clone, mul, mul_1, rsqrt, sub, var_mean # Graph fragment: # %clone : [num_users=2] = call_function[target=torch.ops.aten.clone.default](args = (%permute,), kwargs = {memory_format: torch.contiguous_format}) # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%clone, [2]), kwargs = {correction: 0, keepdim: True}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-06), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clone, %getitem_1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_4), kwargs = {}) # %add_1 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_5), kwargs = {}) triton_poi_fused_native_layer_norm_2 = async_compile.triton('triton_poi_fused_native_layer_norm_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_layer_norm_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = (xindex // 16) x1 = (xindex // 4) % 4 tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr1 + (x0 + (4*x2)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x0 + (4*x2)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + (x3), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/bw/cbwtlqzhq6a5mzatoyulbqvr3fm6cit6icib4drikbzdamoa2b23.py # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.cat] # Source node to ATen node mapping: # x_4 => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%unsqueeze_1, %mul_4], 1), kwargs = {}) triton_poi_fused_cat_3 = async_compile.triton('triton_poi_fused_cat_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 80 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 4) % 5 x0 = xindex % 4 x2 = (xindex // 20) x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + (16*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 5, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.load(in_ptr1 + ((4*x0) + (16*x2) + ((-1) + x1)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = 0.5 tmp11 = tmp9 * tmp10 tmp12 = 0.7071067811865476 tmp13 = tmp9 * tmp12 tmp14 = libdevice.erf(tmp13) tmp15 = 1.0 tmp16 = tmp14 + tmp15 tmp17 = tmp11 * tmp16 tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype) tmp19 = tl.where(tmp6, tmp17, tmp18) tmp20 = tl.where(tmp4, tmp5, tmp19) tl.store(out_ptr0 + (x3), tmp20, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, ), (1, )) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(reinterpret_tensor(primals_3, (1, 4, 4, 4), (64, 16, 4, 1), 0), primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (1, 4, 4, 4), (64, 16, 4, 1)) buf1 = buf0; del buf0 # reuse buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x, avg_pool2d], Original ATen: [aten.convolution, aten.avg_pool2d] stream0 = get_raw_stream(0) triton_poi_fused_avg_pool2d_convolution_0.run(buf1, primals_2, buf2, 64, grid=grid(64), stream=stream0) del primals_2 buf3 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf4 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.native_layer_norm] triton_poi_fused_native_layer_norm_1.run(buf2, buf3, buf4, 16, grid=grid(16), stream=stream0) buf5 = empty_strided_cuda((4, 4, 4), (16, 1, 4), torch.float32) # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.native_layer_norm] triton_poi_fused_native_layer_norm_2.run(buf2, buf3, buf4, primals_4, primals_5, buf5, 64, grid=grid(64), stream=stream0) del buf3 del buf4 buf6 = empty_strided_cuda((4, 5, 4), (20, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.cat] triton_poi_fused_cat_3.run(primals_6, buf5, buf6, 80, grid=grid(80), stream=stream0) del buf5 del primals_6 return (buf6, primals_1, primals_4, primals_5, reinterpret_tensor(primals_3, (1, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0), buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn from functools import partial class FCUDown(nn.Module): """ CNN feature maps -> Transformer patch embeddings """ def __init__(self, inplanes, outplanes, dw_stride, act_layer=nn.GELU, norm_layer=partial(nn.LayerNorm, eps=1e-06)): super(FCUDown, self).__init__() self.dw_stride = dw_stride self.conv_project = nn.Conv2d(inplanes, outplanes, kernel_size=1, stride=1, padding=0) self.sample_pooling = nn.AvgPool2d(kernel_size=dw_stride, stride= dw_stride) self.ln = norm_layer(outplanes) self.act = act_layer() def forward(self, x, x_t): x = self.conv_project(x) x = self.sample_pooling(x).flatten(2).transpose(1, 2) x = self.ln(x) x = self.act(x) x = torch.cat([x_t[:, 0][:, None, :], x], dim=1) return x def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'inplanes': 4, 'outplanes': 4, 'dw_stride': 1}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn from functools import partial assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_avg_pool2d_convolution_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 16 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(in_out_ptr0 + x2, tmp2, xmask) tl.store(out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_native_layer_norm_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask) tmp1 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask) tmp3 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask) tmp5 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-06 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x2, tmp8, xmask) tl.store(out_ptr1 + x2, tmp23, xmask) @triton.jit def triton_poi_fused_native_layer_norm_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = xindex // 16 x1 = xindex // 4 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp3 = tl.load(in_ptr2 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) @triton.jit def triton_poi_fused_cat_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 80 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 5 x0 = xindex % 4 x2 = xindex // 20 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 16 * x2), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 5, tl.int64) tmp9 = tl.load(in_ptr1 + (4 * x0 + 16 * x2 + (-1 + x1)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = 0.5 tmp11 = tmp9 * tmp10 tmp12 = 0.7071067811865476 tmp13 = tmp9 * tmp12 tmp14 = libdevice.erf(tmp13) tmp15 = 1.0 tmp16 = tmp14 + tmp15 tmp17 = tmp11 * tmp16 tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype) tmp19 = tl.where(tmp6, tmp17, tmp18) tmp20 = tl.where(tmp4, tmp5, tmp19) tl.store(out_ptr0 + x3, tmp20, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(reinterpret_tensor(primals_3, (1, 4, 4, 4), (64, 16, 4, 1), 0), primals_1, stride=(1, 1), padding =(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (1, 4, 4, 4), (64, 16, 4, 1)) buf1 = buf0 del buf0 buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_avg_pool2d_convolution_0[grid(64)](buf1, primals_2, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_2 buf3 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf4 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) triton_poi_fused_native_layer_norm_1[grid(16)](buf2, buf3, buf4, 16, XBLOCK=16, num_warps=1, num_stages=1) buf5 = empty_strided_cuda((4, 4, 4), (16, 1, 4), torch.float32) triton_poi_fused_native_layer_norm_2[grid(64)](buf2, buf3, buf4, primals_4, primals_5, buf5, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf3 del buf4 buf6 = empty_strided_cuda((4, 5, 4), (20, 4, 1), torch.float32) triton_poi_fused_cat_3[grid(80)](primals_6, buf5, buf6, 80, XBLOCK= 128, num_warps=4, num_stages=1) del buf5 del primals_6 return buf6, primals_1, primals_4, primals_5, reinterpret_tensor(primals_3, (1, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(buf1, (4, 4, 4 ), (16, 4, 1), 0), buf2 class FCUDownNew(nn.Module): """ CNN feature maps -> Transformer patch embeddings """ def __init__(self, inplanes, outplanes, dw_stride, act_layer=nn.GELU, norm_layer=partial(nn.LayerNorm, eps=1e-06)): super(FCUDownNew, self).__init__() self.dw_stride = dw_stride self.conv_project = nn.Conv2d(inplanes, outplanes, kernel_size=1, stride=1, padding=0) self.sample_pooling = nn.AvgPool2d(kernel_size=dw_stride, stride= dw_stride) self.ln = norm_layer(outplanes) self.act = act_layer() def forward(self, input_0, input_1): primals_1 = self.conv_project.weight primals_2 = self.conv_project.bias primals_4 = self.ln.weight primals_5 = self.ln.bias primals_3 = input_0 primals_6 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
Huzhen757/Conformer
FCUDown
false
5,327
[ "Apache-2.0" ]
1
4f7a80cec28b9ced8c0225a85a32997f7cd2b93c
https://github.com/Huzhen757/Conformer/tree/4f7a80cec28b9ced8c0225a85a32997f7cd2b93c
import torch import torch.nn as nn from functools import partial class Model(nn.Module): """ CNN feature maps -> Transformer patch embeddings """ def __init__(self, inplanes, outplanes, dw_stride, act_layer=nn.GELU, norm_layer=partial(nn.LayerNorm, eps=1e-06)): super().__init__() self.dw_stride = dw_stride self.conv_project = nn.Conv2d(inplanes, outplanes, kernel_size=1, stride=1, padding=0) self.sample_pooling = nn.AvgPool2d(kernel_size=dw_stride, stride= dw_stride) self.ln = norm_layer(outplanes) self.act = act_layer() def forward(self, x, x_t): x = self.conv_project(x) x = self.sample_pooling(x).flatten(2).transpose(1, 2) x = self.ln(x) x = self.act(x) x = torch.cat([x_t[:, 0][:, None, :], x], dim=1) return x def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [4, 4, 1]
PPMConcat
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/cr/ccrgimd5zqak747hzrbdpprnae5dbx4vetggrn46afu3ejbaeqzr.py # Topologically Sorted Source Nodes: [ppm_out, concat_outs], Original ATen: [aten.mean, aten.cat] # Source node to ATen node mapping: # concat_outs => cat # ppm_out => mean # Graph fragment: # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%arg0_1, [-1, -2], True), kwargs = {}) # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%view, %view_1, %view_2, %view_3], 2), kwargs = {}) triton_per_fused_cat_mean_0 = async_compile.triton('triton_per_fused_cat_mean_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[16, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_cat_mean_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_cat_mean_0(in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 16 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp5 = 16.0 tmp6 = tmp4 / tmp5 tl.store(out_ptr1 + (110*x0), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/b7/cb7webixgun5kq7klyyw3pye6ybqszrjc476b25fx2hkpqtlyz4c.py # Topologically Sorted Source Nodes: [ppm_out_1, concat_outs], Original ATen: [aten._adaptive_avg_pool2d, aten.cat] # Source node to ATen node mapping: # concat_outs => cat # ppm_out_1 => _adaptive_avg_pool2d # Graph fragment: # %_adaptive_avg_pool2d : [num_users=1] = call_function[target=torch.ops.aten._adaptive_avg_pool2d.default](args = (%arg0_1, [3, 3]), kwargs = {}) # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%view, %view_1, %view_2, %view_3], 2), kwargs = {}) triton_poi_fused__adaptive_avg_pool2d_cat_1 = async_compile.triton('triton_poi_fused__adaptive_avg_pool2d_cat_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__adaptive_avg_pool2d_cat_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__adaptive_avg_pool2d_cat_1(in_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 3) % 3 x0 = xindex % 3 x2 = (xindex // 9) x5 = xindex x3 = xindex % 9 tmp0 = ((4*x1) // 3) tmp1 = 2 + ((4*x1) // 3) tmp2 = tmp0 < tmp1 tmp3 = ((4*x0) // 3) tmp4 = 2 + ((4*x0) // 3) tmp5 = tmp3 < tmp4 tmp6 = tmp2 & tmp5 tmp7 = tl.load(in_ptr0 + ((4*((4*x1) // 3)) + (16*x2) + ((4*x0) // 3)), tmp6 & xmask, other=0.0) tmp8 = 1 + ((4*x0) // 3) tmp9 = tmp8 < tmp4 tmp10 = tmp2 & tmp9 tmp11 = tl.load(in_ptr0 + (1 + (4*((4*x1) // 3)) + (16*x2) + ((4*x0) // 3)), tmp10 & xmask, other=0.0) tmp12 = tmp11 + tmp7 tmp13 = 1 + ((4*x1) // 3) tmp14 = tmp13 < tmp1 tmp15 = tmp14 & tmp5 tmp16 = tl.load(in_ptr0 + (4 + (4*((4*x1) // 3)) + (16*x2) + ((4*x0) // 3)), tmp15 & xmask, other=0.0) tmp17 = tmp16 + tmp12 tmp18 = tmp14 & tmp9 tmp19 = tl.load(in_ptr0 + (5 + (4*((4*x1) // 3)) + (16*x2) + ((4*x0) // 3)), tmp18 & xmask, other=0.0) tmp20 = tmp19 + tmp17 tmp21 = 1.0 tmp22 = tl.full(tmp21.shape, 0.0, tmp21.dtype) tmp23 = tl.where(tmp6, tmp21, tmp22) tmp24 = tl.where(tmp10, tmp21, tmp22) tmp25 = tmp24 + tmp23 tmp26 = tl.where(tmp15, tmp21, tmp22) tmp27 = tmp26 + tmp25 tmp28 = tl.where(tmp18, tmp21, tmp22) tmp29 = tmp28 + tmp27 tmp30 = tmp20 / tmp29 tl.store(out_ptr1 + (x3 + (110*x2)), tmp30, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/wp/cwp4azliwtvpgqegpgjubymmobqvhael5uz7meise5e3joe5bqu2.py # Topologically Sorted Source Nodes: [ppm_out_2, concat_outs], Original ATen: [aten._adaptive_avg_pool2d, aten.cat] # Source node to ATen node mapping: # concat_outs => cat # ppm_out_2 => _adaptive_avg_pool2d_1 # Graph fragment: # %_adaptive_avg_pool2d_1 : [num_users=1] = call_function[target=torch.ops.aten._adaptive_avg_pool2d.default](args = (%arg0_1, [6, 6]), kwargs = {}) # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%view, %view_1, %view_2, %view_3], 2), kwargs = {}) triton_poi_fused__adaptive_avg_pool2d_cat_2 = async_compile.triton('triton_poi_fused__adaptive_avg_pool2d_cat_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__adaptive_avg_pool2d_cat_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__adaptive_avg_pool2d_cat_2(in_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 6) % 6 x0 = xindex % 6 x2 = (xindex // 36) x5 = xindex x3 = xindex % 36 tmp0 = ((2*x1) // 3) tmp1 = ((9 + (4*x1)) // 6) tmp2 = tmp0 < tmp1 tmp3 = ((2*x0) // 3) tmp4 = ((9 + (4*x0)) // 6) tmp5 = tmp3 < tmp4 tmp6 = tmp2 & tmp5 tmp7 = tl.load(in_ptr0 + ((4*((2*x1) // 3)) + (16*x2) + ((2*x0) // 3)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp8 = 1 + ((2*x0) // 3) tmp9 = tmp8 < tmp4 tmp10 = tmp2 & tmp9 tmp11 = tl.load(in_ptr0 + (1 + (4*((2*x1) // 3)) + (16*x2) + ((2*x0) // 3)), tmp10 & xmask, eviction_policy='evict_last', other=0.0) tmp12 = tmp11 + tmp7 tmp13 = 1 + ((2*x1) // 3) tmp14 = tmp13 < tmp1 tmp15 = tmp14 & tmp5 tmp16 = tl.load(in_ptr0 + (4 + (4*((2*x1) // 3)) + (16*x2) + ((2*x0) // 3)), tmp15 & xmask, eviction_policy='evict_last', other=0.0) tmp17 = tmp16 + tmp12 tmp18 = tmp14 & tmp9 tmp19 = tl.load(in_ptr0 + (5 + (4*((2*x1) // 3)) + (16*x2) + ((2*x0) // 3)), tmp18 & xmask, eviction_policy='evict_last', other=0.0) tmp20 = tmp19 + tmp17 tmp21 = 1.0 tmp22 = tl.full(tmp21.shape, 0.0, tmp21.dtype) tmp23 = tl.where(tmp6, tmp21, tmp22) tmp24 = tl.where(tmp10, tmp21, tmp22) tmp25 = tmp24 + tmp23 tmp26 = tl.where(tmp15, tmp21, tmp22) tmp27 = tmp26 + tmp25 tmp28 = tl.where(tmp18, tmp21, tmp22) tmp29 = tmp28 + tmp27 tmp30 = tmp20 / tmp29 tl.store(out_ptr1 + (x3 + (110*x2)), tmp30, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/nd/cnd5jniex5euxalox74lnevxasjts2znoosbfhhqne7m2q47peko.py # Topologically Sorted Source Nodes: [ppm_out_3, concat_outs], Original ATen: [aten._adaptive_avg_pool2d, aten.cat] # Source node to ATen node mapping: # concat_outs => cat # ppm_out_3 => _adaptive_avg_pool2d_2 # Graph fragment: # %_adaptive_avg_pool2d_2 : [num_users=1] = call_function[target=torch.ops.aten._adaptive_avg_pool2d.default](args = (%arg0_1, [8, 8]), kwargs = {}) # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%view, %view_1, %view_2, %view_3], 2), kwargs = {}) triton_poi_fused__adaptive_avg_pool2d_cat_3 = async_compile.triton('triton_poi_fused__adaptive_avg_pool2d_cat_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__adaptive_avg_pool2d_cat_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__adaptive_avg_pool2d_cat_3(in_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 8) % 8 x0 = xindex % 8 x2 = (xindex // 64) x5 = xindex x3 = xindex % 64 tmp0 = (x1 // 2) tmp1 = ((11 + (4*x1)) // 8) tmp2 = tmp0 < tmp1 tmp3 = (x0 // 2) tmp4 = ((11 + (4*x0)) // 8) tmp5 = tmp3 < tmp4 tmp6 = tmp2 & tmp5 tmp7 = tl.load(in_ptr0 + ((4*(x1 // 2)) + (16*x2) + (x0 // 2)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp8 = 1 + (x0 // 2) tmp9 = tmp8 < tmp4 tmp10 = tmp2 & tmp9 tmp11 = tl.load(in_ptr0 + (1 + (4*(x1 // 2)) + (16*x2) + (x0 // 2)), tmp10 & xmask, eviction_policy='evict_last', other=0.0) tmp12 = tmp11 + tmp7 tmp13 = 1 + (x1 // 2) tmp14 = tmp13 < tmp1 tmp15 = tmp14 & tmp5 tmp16 = tl.load(in_ptr0 + (4 + (4*(x1 // 2)) + (16*x2) + (x0 // 2)), tmp15 & xmask, eviction_policy='evict_last', other=0.0) tmp17 = tmp16 + tmp12 tmp18 = tmp14 & tmp9 tmp19 = tl.load(in_ptr0 + (5 + (4*(x1 // 2)) + (16*x2) + (x0 // 2)), tmp18 & xmask, eviction_policy='evict_last', other=0.0) tmp20 = tmp19 + tmp17 tmp21 = 1.0 tmp22 = tl.full(tmp21.shape, 0.0, tmp21.dtype) tmp23 = tl.where(tmp6, tmp21, tmp22) tmp24 = tl.where(tmp10, tmp21, tmp22) tmp25 = tmp24 + tmp23 tmp26 = tl.where(tmp15, tmp21, tmp22) tmp27 = tmp26 + tmp25 tmp28 = tl.where(tmp18, tmp21, tmp22) tmp29 = tmp28 + tmp27 tmp30 = tmp20 / tmp29 tl.store(out_ptr1 + (x3 + (110*x2)), tmp30, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf8 = empty_strided_cuda((4, 4, 110), (440, 110, 1), torch.float32) buf4 = reinterpret_tensor(buf8, (4, 4, 1), (440, 110, 1), 0) # alias # Topologically Sorted Source Nodes: [ppm_out, concat_outs], Original ATen: [aten.mean, aten.cat] stream0 = get_raw_stream(0) triton_per_fused_cat_mean_0.run(arg0_1, buf4, 16, 16, grid=grid(16), stream=stream0) buf5 = reinterpret_tensor(buf8, (4, 4, 9), (440, 110, 1), 1) # alias # Topologically Sorted Source Nodes: [ppm_out_1, concat_outs], Original ATen: [aten._adaptive_avg_pool2d, aten.cat] triton_poi_fused__adaptive_avg_pool2d_cat_1.run(arg0_1, buf5, 144, grid=grid(144), stream=stream0) buf6 = reinterpret_tensor(buf8, (4, 4, 36), (440, 110, 1), 10) # alias # Topologically Sorted Source Nodes: [ppm_out_2, concat_outs], Original ATen: [aten._adaptive_avg_pool2d, aten.cat] triton_poi_fused__adaptive_avg_pool2d_cat_2.run(arg0_1, buf6, 576, grid=grid(576), stream=stream0) buf7 = reinterpret_tensor(buf8, (4, 4, 64), (440, 110, 1), 46) # alias # Topologically Sorted Source Nodes: [ppm_out_3, concat_outs], Original ATen: [aten._adaptive_avg_pool2d, aten.cat] triton_poi_fused__adaptive_avg_pool2d_cat_3.run(arg0_1, buf7, 1024, grid=grid(1024), stream=stream0) del arg0_1 return (buf8, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch._C import torch.serialization class PPMConcat(nn.ModuleList): """Pyramid Pooling Module that only concat the features of each layer. Args: pool_scales (tuple[int]): Pooling scales used in Pooling Pyramid Module. """ def __init__(self, pool_scales=(1, 3, 6, 8)): super(PPMConcat, self).__init__([nn.AdaptiveAvgPool2d(pool_scale) for pool_scale in pool_scales]) def forward(self, feats): """Forward function.""" ppm_outs = [] for ppm in self: ppm_out = ppm(feats) ppm_outs.append(ppm_out.view(*feats.shape[:2], -1)) concat_outs = torch.cat(ppm_outs, dim=2) return concat_outs def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch._C import torch.serialization assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_cat_mean_0(in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp5 = 16.0 tmp6 = tmp4 / tmp5 tl.store(out_ptr1 + 110 * x0, tmp6, xmask) @triton.jit def triton_poi_fused__adaptive_avg_pool2d_cat_1(in_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 3 % 3 x0 = xindex % 3 x2 = xindex // 9 x3 = xindex % 9 tmp0 = 4 * x1 // 3 tmp1 = 2 + 4 * x1 // 3 tmp2 = tmp0 < tmp1 tmp3 = 4 * x0 // 3 tmp4 = 2 + 4 * x0 // 3 tmp5 = tmp3 < tmp4 tmp6 = tmp2 & tmp5 tmp7 = tl.load(in_ptr0 + (4 * (4 * x1 // 3) + 16 * x2 + 4 * x0 // 3), tmp6 & xmask, other=0.0) tmp8 = 1 + 4 * x0 // 3 tmp9 = tmp8 < tmp4 tmp10 = tmp2 & tmp9 tmp11 = tl.load(in_ptr0 + (1 + 4 * (4 * x1 // 3) + 16 * x2 + 4 * x0 // 3), tmp10 & xmask, other=0.0) tmp12 = tmp11 + tmp7 tmp13 = 1 + 4 * x1 // 3 tmp14 = tmp13 < tmp1 tmp15 = tmp14 & tmp5 tmp16 = tl.load(in_ptr0 + (4 + 4 * (4 * x1 // 3) + 16 * x2 + 4 * x0 // 3), tmp15 & xmask, other=0.0) tmp17 = tmp16 + tmp12 tmp18 = tmp14 & tmp9 tmp19 = tl.load(in_ptr0 + (5 + 4 * (4 * x1 // 3) + 16 * x2 + 4 * x0 // 3), tmp18 & xmask, other=0.0) tmp20 = tmp19 + tmp17 tmp21 = 1.0 tmp22 = tl.full(tmp21.shape, 0.0, tmp21.dtype) tmp23 = tl.where(tmp6, tmp21, tmp22) tmp24 = tl.where(tmp10, tmp21, tmp22) tmp25 = tmp24 + tmp23 tmp26 = tl.where(tmp15, tmp21, tmp22) tmp27 = tmp26 + tmp25 tmp28 = tl.where(tmp18, tmp21, tmp22) tmp29 = tmp28 + tmp27 tmp30 = tmp20 / tmp29 tl.store(out_ptr1 + (x3 + 110 * x2), tmp30, xmask) @triton.jit def triton_poi_fused__adaptive_avg_pool2d_cat_2(in_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 6 % 6 x0 = xindex % 6 x2 = xindex // 36 x3 = xindex % 36 tmp0 = 2 * x1 // 3 tmp1 = (9 + 4 * x1) // 6 tmp2 = tmp0 < tmp1 tmp3 = 2 * x0 // 3 tmp4 = (9 + 4 * x0) // 6 tmp5 = tmp3 < tmp4 tmp6 = tmp2 & tmp5 tmp7 = tl.load(in_ptr0 + (4 * (2 * x1 // 3) + 16 * x2 + 2 * x0 // 3), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp8 = 1 + 2 * x0 // 3 tmp9 = tmp8 < tmp4 tmp10 = tmp2 & tmp9 tmp11 = tl.load(in_ptr0 + (1 + 4 * (2 * x1 // 3) + 16 * x2 + 2 * x0 // 3), tmp10 & xmask, eviction_policy='evict_last', other=0.0) tmp12 = tmp11 + tmp7 tmp13 = 1 + 2 * x1 // 3 tmp14 = tmp13 < tmp1 tmp15 = tmp14 & tmp5 tmp16 = tl.load(in_ptr0 + (4 + 4 * (2 * x1 // 3) + 16 * x2 + 2 * x0 // 3), tmp15 & xmask, eviction_policy='evict_last', other=0.0) tmp17 = tmp16 + tmp12 tmp18 = tmp14 & tmp9 tmp19 = tl.load(in_ptr0 + (5 + 4 * (2 * x1 // 3) + 16 * x2 + 2 * x0 // 3), tmp18 & xmask, eviction_policy='evict_last', other=0.0) tmp20 = tmp19 + tmp17 tmp21 = 1.0 tmp22 = tl.full(tmp21.shape, 0.0, tmp21.dtype) tmp23 = tl.where(tmp6, tmp21, tmp22) tmp24 = tl.where(tmp10, tmp21, tmp22) tmp25 = tmp24 + tmp23 tmp26 = tl.where(tmp15, tmp21, tmp22) tmp27 = tmp26 + tmp25 tmp28 = tl.where(tmp18, tmp21, tmp22) tmp29 = tmp28 + tmp27 tmp30 = tmp20 / tmp29 tl.store(out_ptr1 + (x3 + 110 * x2), tmp30, xmask) @triton.jit def triton_poi_fused__adaptive_avg_pool2d_cat_3(in_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 8 % 8 x0 = xindex % 8 x2 = xindex // 64 x3 = xindex % 64 tmp0 = x1 // 2 tmp1 = (11 + 4 * x1) // 8 tmp2 = tmp0 < tmp1 tmp3 = x0 // 2 tmp4 = (11 + 4 * x0) // 8 tmp5 = tmp3 < tmp4 tmp6 = tmp2 & tmp5 tmp7 = tl.load(in_ptr0 + (4 * (x1 // 2) + 16 * x2 + x0 // 2), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp8 = 1 + x0 // 2 tmp9 = tmp8 < tmp4 tmp10 = tmp2 & tmp9 tmp11 = tl.load(in_ptr0 + (1 + 4 * (x1 // 2) + 16 * x2 + x0 // 2), tmp10 & xmask, eviction_policy='evict_last', other=0.0) tmp12 = tmp11 + tmp7 tmp13 = 1 + x1 // 2 tmp14 = tmp13 < tmp1 tmp15 = tmp14 & tmp5 tmp16 = tl.load(in_ptr0 + (4 + 4 * (x1 // 2) + 16 * x2 + x0 // 2), tmp15 & xmask, eviction_policy='evict_last', other=0.0) tmp17 = tmp16 + tmp12 tmp18 = tmp14 & tmp9 tmp19 = tl.load(in_ptr0 + (5 + 4 * (x1 // 2) + 16 * x2 + x0 // 2), tmp18 & xmask, eviction_policy='evict_last', other=0.0) tmp20 = tmp19 + tmp17 tmp21 = 1.0 tmp22 = tl.full(tmp21.shape, 0.0, tmp21.dtype) tmp23 = tl.where(tmp6, tmp21, tmp22) tmp24 = tl.where(tmp10, tmp21, tmp22) tmp25 = tmp24 + tmp23 tmp26 = tl.where(tmp15, tmp21, tmp22) tmp27 = tmp26 + tmp25 tmp28 = tl.where(tmp18, tmp21, tmp22) tmp29 = tmp28 + tmp27 tmp30 = tmp20 / tmp29 tl.store(out_ptr1 + (x3 + 110 * x2), tmp30, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf8 = empty_strided_cuda((4, 4, 110), (440, 110, 1), torch.float32) buf4 = reinterpret_tensor(buf8, (4, 4, 1), (440, 110, 1), 0) get_raw_stream(0) triton_per_fused_cat_mean_0[grid(16)](arg0_1, buf4, 16, 16, XBLOCK= 8, num_warps=2, num_stages=1) buf5 = reinterpret_tensor(buf8, (4, 4, 9), (440, 110, 1), 1) triton_poi_fused__adaptive_avg_pool2d_cat_1[grid(144)](arg0_1, buf5, 144, XBLOCK=128, num_warps=4, num_stages=1) buf6 = reinterpret_tensor(buf8, (4, 4, 36), (440, 110, 1), 10) triton_poi_fused__adaptive_avg_pool2d_cat_2[grid(576)](arg0_1, buf6, 576, XBLOCK=128, num_warps=4, num_stages=1) buf7 = reinterpret_tensor(buf8, (4, 4, 64), (440, 110, 1), 46) triton_poi_fused__adaptive_avg_pool2d_cat_3[grid(1024)](arg0_1, buf7, 1024, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf8, class PPMConcatNew(nn.ModuleList): """Pyramid Pooling Module that only concat the features of each layer. Args: pool_scales (tuple[int]): Pooling scales used in Pooling Pyramid Module. """ def __init__(self, pool_scales=(1, 3, 6, 8)): super(PPMConcatNew, self).__init__([nn.AdaptiveAvgPool2d(pool_scale ) for pool_scale in pool_scales]) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
HusterRC/mmsegmentation
PPMConcat
false
5,328
[ "Apache-2.0" ]
1
c3e4dbc2e06de3f47f75098f76772b4ee7e91e35
https://github.com/HusterRC/mmsegmentation/tree/c3e4dbc2e06de3f47f75098f76772b4ee7e91e35
import torch import torch.nn as nn import torch._C import torch.serialization class Model(nn.ModuleList): """Pyramid Pooling Module that only concat the features of each layer. Args: pool_scales (tuple[int]): Pooling scales used in Pooling Pyramid Module. """ def __init__(self, pool_scales=(1, 3, 6, 8)): super().__init__([nn.AdaptiveAvgPool2d(pool_scale) for pool_scale in pool_scales]) def forward(self, feats): """Forward function.""" ppm_outs = [] for ppm in self: ppm_out = ppm(feats) ppm_outs.append(ppm_out.view(*feats.shape[:2], -1)) concat_outs = torch.cat(ppm_outs, dim=2) return concat_outs def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
BCELoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/5u/c5u7c3xfjaazdtxwasop4c2d3vlwaagqtikdsnpoz56tcxjbvfvx.py # Topologically Sorted Source Nodes: [loss], Original ATen: [aten.binary_cross_entropy_with_logits] # Source node to ATen node mapping: # loss => abs_1, exp, full_default, log1p, mean, minimum, mul, neg, sub, sub_1, sub_2 # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg1_1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %arg0_1), kwargs = {}) # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %minimum : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%full_default, %arg0_1), kwargs = {}) # %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%arg0_1,), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%abs_1,), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%neg,), kwargs = {}) # %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum, %log1p), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %sub_1), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sub_2,), kwargs = {}) triton_per_fused_binary_cross_entropy_with_logits_0 = async_compile.triton('triton_per_fused_binary_cross_entropy_with_logits_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_binary_cross_entropy_with_logits_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_binary_cross_entropy_with_logits_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp3 = tl.load(in_ptr1 + (r0), None) tmp1 = 1.0 tmp2 = tmp1 - tmp0 tmp4 = tmp2 * tmp3 tmp5 = 0.0 tmp6 = triton_helpers.minimum(tmp5, tmp3) tmp7 = tl_math.abs(tmp3) tmp8 = -tmp7 tmp9 = tl_math.exp(tmp8) tmp10 = libdevice.log1p(tmp9) tmp11 = tmp6 - tmp10 tmp12 = tmp4 - tmp11 tmp13 = tl.broadcast_to(tmp12, [RBLOCK]) tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0)) tmp16 = 256.0 tmp17 = tmp15 / tmp16 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp17, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [loss], Original ATen: [aten.binary_cross_entropy_with_logits] stream0 = get_raw_stream(0) triton_per_fused_binary_cross_entropy_with_logits_0.run(buf1, arg1_1, arg0_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional import torch.nn as nn def centercrop(image, w, h): _nt, _ct, ht, wt = image.size() padw, padh = (wt - w) // 2, (ht - h) // 2 if padw > 0 and padh > 0: image = image[:, :, padh:-padh, padw:-padw] return image class BCELoss(nn.Module): def __init__(self): super(BCELoss, self).__init__() self.bce = nn.BCEWithLogitsLoss() def forward(self, y_pred, y_true): _n, _ch, h, w = y_pred.size() y_true = centercrop(y_true, w, h) loss = self.bce(y_pred, y_true) return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn.functional import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_binary_cross_entropy_with_logits_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp3 = tl.load(in_ptr1 + r0, None) tmp1 = 1.0 tmp2 = tmp1 - tmp0 tmp4 = tmp2 * tmp3 tmp5 = 0.0 tmp6 = triton_helpers.minimum(tmp5, tmp3) tmp7 = tl_math.abs(tmp3) tmp8 = -tmp7 tmp9 = tl_math.exp(tmp8) tmp10 = libdevice.log1p(tmp9) tmp11 = tmp6 - tmp10 tmp12 = tmp4 - tmp11 tmp13 = tl.broadcast_to(tmp12, [RBLOCK]) tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0)) tmp16 = 256.0 tmp17 = tmp15 / tmp16 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp17, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_binary_cross_entropy_with_logits_0[grid(1)](buf1, arg1_1, arg0_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, def centercrop(image, w, h): _nt, _ct, ht, wt = image.size() padw, padh = (wt - w) // 2, (ht - h) // 2 if padw > 0 and padh > 0: image = image[:, :, padh:-padh, padw:-padw] return image class BCELossNew(nn.Module): def __init__(self): super(BCELossNew, self).__init__() self.bce = nn.BCEWithLogitsLoss() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
HelenGuohx/cv-ferattn-code
BCELoss
false
5,329
[ "MIT" ]
1
faa9b7850fe2a0f8c08193bb129b5fec4639d616
https://github.com/HelenGuohx/cv-ferattn-code/tree/faa9b7850fe2a0f8c08193bb129b5fec4639d616
import torch import torch.nn.functional import torch.nn as nn def centercrop(image, w, h): _nt, _ct, ht, wt = image.size() padw, padh = (wt - w) // 2, (ht - h) // 2 if padw > 0 and padh > 0: image = image[:, :, padh:-padh, padw:-padw] return image class Model(nn.Module): def __init__(self): super().__init__() self.bce = nn.BCEWithLogitsLoss() def forward(self, y_pred, y_true): _n, _ch, h, w = y_pred.size() y_true = centercrop(y_true, w, h) loss = self.bce(y_pred, y_true) return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
TransformerEncoderLayer
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/dm/cdmxr4mhsj4zxs6q6xjyzdhvwtjf7kehpknr6t4nttpashbn2ko6.py # Topologically Sorted Source Nodes: [query_1], Original ATen: [aten.mul] # Source node to ATen node mapping: # query_1 => mul # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute_3, 1.0), kwargs = {}) triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x2 % 4), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/hz/chz2sqsqk26mwhf2dxhgh44jfpu2er5yqjftwkzfav5ctqtx5e7f.py # Topologically Sorted Source Nodes: [attn_output_weights_1], Original ATen: [aten._softmax] # Source node to ATen node mapping: # attn_output_weights_1 => amax, exp, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%bmm, [-1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%bmm, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x2), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/3f/c3fx6bzkalkw7u7askqdnz4rzlcoyqiec4r434sjc5x3axxgkrmr.py # Topologically Sorted Source Nodes: [attn_output_weights_1], Original ATen: [aten._softmax] # Source node to ATen node mapping: # attn_output_weights_1 => div, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/um/cumdt56px4jhgi4x7ers5m2jlyr4stfdyfhyb47o43khr5qzdg6f.py # Topologically Sorted Source Nodes: [attn_output_3], Original ATen: [aten.clone] # Source node to ATen node mapping: # attn_output_3 => clone_1 # Graph fragment: # %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%view_15,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_3 = async_compile.triton('triton_poi_fused_clone_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 4 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x1)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x1 + (16*y0)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/3l/c3liyh7ylrronjiab5ornyzvenwo4xvajfb7agab2zd3if3ftciq.py # Topologically Sorted Source Nodes: [attn_output_3, src, src_1], Original ATen: [aten.add, aten.native_layer_norm] # Source node to ATen node mapping: # attn_output_3 => add # src => add_1 # src_1 => var_mean # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_17, %primals_9), kwargs = {}) # %add_1 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %add), kwargs = {}) # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add_1, [2]), kwargs = {correction: 0, keepdim: True}) triton_poi_fused_add_native_layer_norm_4 = async_compile.triton('triton_poi_fused_add_native_layer_norm_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 12, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_native_layer_norm_4(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + (0)) tmp3 = tl.broadcast_to(tmp2, [XBLOCK]) tmp6 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr2 + (1)) tmp9 = tl.broadcast_to(tmp8, [XBLOCK]) tmp13 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr2 + (2)) tmp16 = tl.broadcast_to(tmp15, [XBLOCK]) tmp20 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp21 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp22 = tl.load(in_ptr2 + (3)) tmp23 = tl.broadcast_to(tmp22, [XBLOCK]) tmp4 = tmp1 + tmp3 tmp5 = tmp0 + tmp4 tmp10 = tmp7 + tmp9 tmp11 = tmp6 + tmp10 tmp12 = tmp5 + tmp11 tmp17 = tmp14 + tmp16 tmp18 = tmp13 + tmp17 tmp19 = tmp12 + tmp18 tmp24 = tmp21 + tmp23 tmp25 = tmp20 + tmp24 tmp26 = tmp19 + tmp25 tmp27 = 4.0 tmp28 = tmp26 / tmp27 tmp29 = tmp5 - tmp28 tmp30 = tmp29 * tmp29 tmp31 = tmp11 - tmp28 tmp32 = tmp31 * tmp31 tmp33 = tmp30 + tmp32 tmp34 = tmp18 - tmp28 tmp35 = tmp34 * tmp34 tmp36 = tmp33 + tmp35 tmp37 = tmp25 - tmp28 tmp38 = tmp37 * tmp37 tmp39 = tmp36 + tmp38 tmp40 = tmp39 / tmp27 tl.store(out_ptr0 + (x0), tmp28, xmask) tl.store(out_ptr1 + (x0), tmp40, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/oy/coy5a25ze4ikafr5vfdbseijd2cjm7oj5y2sikvp2wuokeuaobar.py # Topologically Sorted Source Nodes: [attn_output_3, src, src_1], Original ATen: [aten.add, aten.native_layer_norm] # Source node to ATen node mapping: # attn_output_3 => add # src => add_1 # src_1 => add_2, add_3, mul_1, mul_2, rsqrt, sub_1 # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_17, %primals_9), kwargs = {}) # %add_1 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %add), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_2,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_1, %getitem_1), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %rsqrt), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_1, %primals_10), kwargs = {}) # %add_3 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_2, %primals_11), kwargs = {}) triton_poi_fused_add_native_layer_norm_5 = async_compile.triton('triton_poi_fused_add_native_layer_norm_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_native_layer_norm_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x2), xmask) tmp2 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + (x1), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr6 + (x0), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tmp6 = tmp4 - tmp5 tmp8 = 1e-05 tmp9 = tmp7 + tmp8 tmp10 = libdevice.rsqrt(tmp9) tmp11 = tmp6 * tmp10 tmp13 = tmp11 * tmp12 tmp15 = tmp13 + tmp14 tl.store(out_ptr0 + (x2), tmp15, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/ch/cchf4ieswr2fbshzqc7osx3z3h5rid3j2mzzw4nz3fh7yttl3xkf.py # Topologically Sorted Source Nodes: [gelu], Original ATen: [aten.gelu] # Source node to ATen node mapping: # gelu => add_4, erf, mul_3, mul_4, mul_5 # Graph fragment: # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_19, 0.5), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_19, 0.7071067811865476), kwargs = {}) # %erf : [num_users=1] = call_function[target=torch.ops.aten.erf.default](args = (%mul_4,), kwargs = {}) # %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%erf, 1), kwargs = {}) # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_3, %add_4), kwargs = {}) triton_poi_fused_gelu_6 = async_compile.triton('triton_poi_fused_gelu_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32768], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_gelu_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_gelu_6(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 32768 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), None) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = 0.7071067811865476 tmp4 = tmp0 * tmp3 tmp5 = libdevice.erf(tmp4) tmp6 = 1.0 tmp7 = tmp5 + tmp6 tmp8 = tmp2 * tmp7 tl.store(out_ptr0 + (x0), tmp8, None) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/5j/c5jq4slay2gzyh6whguivh7mf6amdb7fclvyghyk5gdqukw7fkho.py # Topologically Sorted Source Nodes: [src_2], Original ATen: [aten.add] # Source node to ATen node mapping: # src_2 => add_5 # Graph fragment: # %add_5 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_3, %view_21), kwargs = {}) triton_poi_fused_add_7 = async_compile.triton('triton_poi_fused_add_7', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_7', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_7(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_out_ptr0 + (x2), xmask) tmp2 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/m7/cm7alyvey7ehr4ipkugewxth4wd7hd3x37adkk5crh4vogw2ddrm.py # Topologically Sorted Source Nodes: [src_3], Original ATen: [aten.native_layer_norm] # Source node to ATen node mapping: # src_3 => add_6, rsqrt_1, var_mean_1 # Graph fragment: # %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add_5, [2]), kwargs = {correction: 0, keepdim: True}) # %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-05), kwargs = {}) # %rsqrt_1 : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_6,), kwargs = {}) triton_poi_fused_native_layer_norm_8 = async_compile.triton('triton_poi_fused_native_layer_norm_8', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_layer_norm_8(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + (x0), tmp8, xmask) tl.store(out_ptr1 + (x0), tmp23, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/4d/c4dqmytly5zolisxgude22db5p3pbommvxualmvn6ercwtoppkm7.py # Topologically Sorted Source Nodes: [src_3], Original ATen: [aten.native_layer_norm] # Source node to ATen node mapping: # src_3 => add_6, add_7, mul_6, mul_7, rsqrt_1, sub_2, var_mean_1 # Graph fragment: # %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add_5, [2]), kwargs = {correction: 0, keepdim: True}) # %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-05), kwargs = {}) # %rsqrt_1 : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_6,), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_5, %getitem_3), kwargs = {}) # %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_2, %rsqrt_1), kwargs = {}) # %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_6, %primals_16), kwargs = {}) # %add_7 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_7, %primals_17), kwargs = {}) triton_poi_fused_native_layer_norm_9 = async_compile.triton('triton_poi_fused_native_layer_norm_9', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_layer_norm_9(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4, ), (1, )) assert_size_stride(primals_8, (4, 4), (4, 1)) assert_size_stride(primals_9, (4, ), (1, )) assert_size_stride(primals_10, (4, ), (1, )) assert_size_stride(primals_11, (4, ), (1, )) assert_size_stride(primals_12, (2048, 4), (4, 1)) assert_size_stride(primals_13, (2048, ), (1, )) assert_size_stride(primals_14, (4, 2048), (2048, 1)) assert_size_stride(primals_15, (4, ), (1, )) assert_size_stride(primals_16, (4, ), (1, )) assert_size_stride(primals_17, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [k], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_4 del primals_5 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [v], Original ATen: [aten.addmm] extern_kernels.addmm(primals_7, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_6 del primals_7 buf3 = reinterpret_tensor(buf0, (16, 4, 1), (1, 16, 64), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [query_1], Original ATen: [aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_mul_0.run(buf3, primals_3, 64, grid=grid(64), stream=stream0) del primals_3 buf4 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [attn_output_weights], Original ATen: [aten.bmm] extern_kernels.bmm(buf3, reinterpret_tensor(buf1, (16, 1, 4), (1, 1, 16), 0), out=buf4) buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [attn_output_weights_1], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf4, buf5, 256, grid=grid(256), stream=stream0) buf6 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [attn_output_weights_1], Original ATen: [aten._softmax] triton_poi_fused__softmax_2.run(buf5, buf6, 256, grid=grid(256), stream=stream0) del buf5 buf7 = empty_strided_cuda((16, 4, 1), (4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [attn_output], Original ATen: [aten.bmm] extern_kernels.bmm(buf6, reinterpret_tensor(buf2, (16, 4, 1), (1, 16, 1), 0), out=buf7) buf8 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [attn_output_3], Original ATen: [aten.clone] triton_poi_fused_clone_3.run(buf7, buf8, 4, 16, grid=grid(4, 16), stream=stream0) buf9 = reinterpret_tensor(buf7, (16, 4), (4, 1), 0); del buf7 # reuse # Topologically Sorted Source Nodes: [attn_output_3], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf8, (16, 4), (4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), out=buf9) buf10 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf11 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) # Topologically Sorted Source Nodes: [attn_output_3, src, src_1], Original ATen: [aten.add, aten.native_layer_norm] triton_poi_fused_add_native_layer_norm_4.run(primals_1, buf9, primals_9, buf10, buf11, 16, grid=grid(16), stream=stream0) buf12 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [attn_output_3, src, src_1], Original ATen: [aten.add, aten.native_layer_norm] triton_poi_fused_add_native_layer_norm_5.run(primals_1, buf9, primals_9, buf10, buf11, primals_10, primals_11, buf12, 64, grid=grid(64), stream=stream0) del primals_11 buf13 = empty_strided_cuda((16, 2048), (2048, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_4], Original ATen: [aten.addmm] extern_kernels.addmm(primals_13, reinterpret_tensor(buf12, (16, 4), (4, 1), 0), reinterpret_tensor(primals_12, (4, 2048), (1, 4), 0), alpha=1, beta=1, out=buf13) del primals_13 buf14 = empty_strided_cuda((4, 4, 2048), (8192, 2048, 1), torch.float32) # Topologically Sorted Source Nodes: [gelu], Original ATen: [aten.gelu] triton_poi_fused_gelu_6.run(buf13, buf14, 32768, grid=grid(32768), stream=stream0) buf15 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf14, (16, 2048), (2048, 1), 0), reinterpret_tensor(primals_14, (2048, 4), (1, 2048), 0), out=buf15) buf16 = reinterpret_tensor(buf15, (4, 4, 4), (16, 4, 1), 0); del buf15 # reuse # Topologically Sorted Source Nodes: [src_2], Original ATen: [aten.add] triton_poi_fused_add_7.run(buf16, buf12, primals_15, 64, grid=grid(64), stream=stream0) del primals_15 buf17 = buf11; del buf11 # reuse buf18 = buf10; del buf10 # reuse # Topologically Sorted Source Nodes: [src_3], Original ATen: [aten.native_layer_norm] triton_poi_fused_native_layer_norm_8.run(buf16, buf17, buf18, 16, grid=grid(16), stream=stream0) buf19 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [src_3], Original ATen: [aten.native_layer_norm] triton_poi_fused_native_layer_norm_9.run(buf16, buf17, buf18, primals_16, primals_17, buf19, 64, grid=grid(64), stream=stream0) del buf17 del buf18 del primals_17 return (buf19, primals_1, primals_9, primals_10, primals_16, buf6, reinterpret_tensor(buf8, (16, 4), (4, 1), 0), buf9, reinterpret_tensor(buf12, (16, 4), (4, 1), 0), buf13, reinterpret_tensor(buf14, (16, 2048), (2048, 1), 0), buf16, primals_14, primals_12, primals_8, reinterpret_tensor(buf2, (16, 1, 4), (1, 1, 16), 0), reinterpret_tensor(buf3, (16, 1, 4), (1, 1, 16), 0), reinterpret_tensor(buf1, (16, 4, 1), (1, 16, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((2048, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_13 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32) primals_14 = rand_strided((4, 2048), (2048, 1), device='cuda:0', dtype=torch.float32) primals_15 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_16 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_17 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F from torch.nn import Linear from torch.nn import Dropout from torch.nn import LayerNorm from typing import Optional import torch.utils.data from typing import Tuple class InProjContainer(torch.nn.Module): def __init__(self, query_proj, key_proj, value_proj): """A in-proj container to project query/key/value in MultiheadAttention. This module happens before reshaping the projected query/key/value into multiple heads. See the linear layers (bottom) of Multi-head Attention in Fig 2 of Attention Is All You Need paper. Also check the usage example in torchtext.nn.MultiheadAttentionContainer. Args: query_proj: a proj layer for query. A typical projection layer is torch.nn.Linear. key_proj: a proj layer for key. A typical projection layer is torch.nn.Linear. value_proj: a proj layer for value. A typical projection layer is torch.nn.Linear. """ super(InProjContainer, self).__init__() self.query_proj = query_proj self.key_proj = key_proj self.value_proj = value_proj def forward(self, query: 'torch.Tensor', key: 'torch.Tensor', value: 'torch.Tensor') ->Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: """Projects the input sequences using in-proj layers. query/key/value are simply passed to the forward func of query/key/value_proj, respectively. Args: query (Tensor): The query to be projected. key (Tensor): The keys to be projected. value (Tensor): The values to be projected. Examples:: >>> import torch >>> from torchtext.nn import InProjContainer >>> embed_dim, bsz = 10, 64 >>> in_proj_container = InProjContainer(torch.nn.Linear(embed_dim, embed_dim), torch.nn.Linear(embed_dim, embed_dim), torch.nn.Linear(embed_dim, embed_dim)) >>> q = torch.rand((5, bsz, embed_dim)) >>> k = v = torch.rand((6, bsz, embed_dim)) >>> q, k, v = in_proj_container(q, k, v) """ return self.query_proj(query), self.key_proj(key), self.value_proj( value) class MultiheadAttentionContainer(torch.nn.Module): def __init__(self, nhead, in_proj_container, attention_layer, out_proj, batch_first=False): """ A multi-head attention container Args: nhead: the number of heads in the multiheadattention model in_proj_container: A container of multi-head in-projection linear layers (a.k.a nn.Linear). attention_layer: The custom attention layer. The input sent from MHA container to the attention layer is in the shape of `(..., L, N * H, E / H)` for query and `(..., S, N * H, E / H)` for key/value while the output shape of the attention layer is expected to be `(..., L, N * H, E / H)`. The attention_layer needs to support broadcast if users want the overall MultiheadAttentionContainer with broadcast. out_proj: The multi-head out-projection layer (a.k.a nn.Linear). batch_first: If ``True``, then the input and output tensors are provided as `(..., N, L, E)`. Default: ``False`` Examples:: >>> import torch >>> from torchtext.nn import MultiheadAttentionContainer, InProjContainer, ScaledDotProduct >>> embed_dim, num_heads, bsz = 10, 5, 64 >>> in_proj_container = InProjContainer(torch.nn.Linear(embed_dim, embed_dim), torch.nn.Linear(embed_dim, embed_dim), torch.nn.Linear(embed_dim, embed_dim)) >>> MHA = MultiheadAttentionContainer(num_heads, in_proj_container, ScaledDotProduct(), torch.nn.Linear(embed_dim, embed_dim)) >>> query = torch.rand((21, bsz, embed_dim)) >>> key = value = torch.rand((16, bsz, embed_dim)) >>> attn_output, attn_weights = MHA(query, key, value) >>> print(attn_output.shape) >>> torch.Size([21, 64, 10]) """ super(MultiheadAttentionContainer, self).__init__() self.nhead = nhead self.in_proj_container = in_proj_container self.attention_layer = attention_layer self.out_proj = out_proj self.batch_first = batch_first def forward(self, query: 'torch.Tensor', key: 'torch.Tensor', value: 'torch.Tensor', attn_mask: 'Optional[torch.Tensor]'=None, bias_k: 'Optional[torch.Tensor]'=None, bias_v: 'Optional[torch.Tensor]'=None ) ->Tuple[torch.Tensor, torch.Tensor]: """ Args: query (Tensor): The query of the attention function. See "Attention Is All You Need" for more details. key (Tensor): The keys of the attention function. See "Attention Is All You Need" for more details. value (Tensor): The values of the attention function. See "Attention Is All You Need" for more details. attn_mask (BoolTensor, optional): 3D mask that prevents attention to certain positions. bias_k (Tensor, optional): one more key and value sequence to be added to keys at sequence dim (dim=-3). Those are used for incremental decoding. Users should provide ``bias_v``. bias_v (Tensor, optional): one more key and value sequence to be added to values at sequence dim (dim=-3). Those are used for incremental decoding. Users should also provide ``bias_k``. Shape: - Inputs: - query: :math:`(..., L, N, E)` - key: :math:`(..., S, N, E)` - value: :math:`(..., S, N, E)` - attn_mask, bias_k and bias_v: same with the shape of the corresponding args in attention layer. - Outputs: - attn_output: :math:`(..., L, N, E)` - attn_output_weights: :math:`(N * H, L, S)` Note: It's optional to have the query/key/value inputs with more than three dimensions (for broadcast purpose). The MultiheadAttentionContainer module will operate on the last three dimensions. where where L is the target length, S is the sequence length, H is the number of attention heads, N is the batch size, and E is the embedding dimension. """ if self.batch_first: query, key, value = query.transpose(-3, -2), key.transpose(-3, -2 ), value.transpose(-3, -2) tgt_len, src_len, bsz, embed_dim = query.size(-3), key.size(-3 ), query.size(-2), query.size(-1) q, k, v = self.in_proj_container(query, key, value) assert q.size(-1 ) % self.nhead == 0, "query's embed_dim must be divisible by the number of heads" head_dim = q.size(-1) // self.nhead q = q.reshape(tgt_len, bsz * self.nhead, head_dim) assert k.size(-1 ) % self.nhead == 0, "key's embed_dim must be divisible by the number of heads" head_dim = k.size(-1) // self.nhead k = k.reshape(src_len, bsz * self.nhead, head_dim) assert v.size(-1 ) % self.nhead == 0, "value's embed_dim must be divisible by the number of heads" head_dim = v.size(-1) // self.nhead v = v.reshape(src_len, bsz * self.nhead, head_dim) attn_output, attn_output_weights = self.attention_layer(q, k, v, attn_mask=attn_mask, bias_k=bias_k, bias_v=bias_v) attn_output = attn_output.reshape(tgt_len, bsz, embed_dim) attn_output = self.out_proj(attn_output) if self.batch_first: attn_output = attn_output.transpose(-3, -2) return attn_output, attn_output_weights class ScaledDotProduct(torch.nn.Module): def __init__(self, dropout=0.0, batch_first=False): """Processes a projected query and key-value pair to apply scaled dot product attention. Args: dropout (float): probability of dropping an attention weight. batch_first: If ``True``, then the input and output tensors are provided as `(batch, seq, feature)`. Default: ``False`` Examples:: >>> import torch, torchtext >>> SDP = torchtext.nn.ScaledDotProduct(dropout=0.1) >>> q = torch.randn(21, 256, 3) >>> k = v = torch.randn(21, 256, 3) >>> attn_output, attn_weights = SDP(q, k, v) >>> print(attn_output.shape, attn_weights.shape) torch.Size([21, 256, 3]) torch.Size([256, 21, 21]) """ super(ScaledDotProduct, self).__init__() self.dropout = dropout self.batch_first = batch_first def forward(self, query: 'torch.Tensor', key: 'torch.Tensor', value: 'torch.Tensor', attn_mask: 'Optional[torch.Tensor]'=None, bias_k: 'Optional[torch.Tensor]'=None, bias_v: 'Optional[torch.Tensor]'=None ) ->Tuple[torch.Tensor, torch.Tensor]: """Uses a scaled dot product with the projected key-value pair to update the projected query. Args: query (Tensor): Projected query key (Tensor): Projected key value (Tensor): Projected value attn_mask (BoolTensor, optional): 3D mask that prevents attention to certain positions. attn_mask (BoolTensor, optional): 3D mask that prevents attention to certain positions. bias_k (Tensor, optional): one more key and value sequence to be added to keys at sequence dim (dim=-3). Those are used for incremental decoding. Users should provide ``bias_v``. bias_v (Tensor, optional): one more key and value sequence to be added to values at sequence dim (dim=-3). Those are used for incremental decoding. Users should also provide ``bias_k``. Shape: - query: :math:`(..., L, N * H, E / H)` - key: :math:`(..., S, N * H, E / H)` - value: :math:`(..., S, N * H, E / H)` - attn_mask: :math:`(N * H, L, S)`, positions with ``True`` are not allowed to attend while ``False`` values will be unchanged. - bias_k and bias_v:bias: :math:`(1, N * H, E / H)` - Output: :math:`(..., L, N * H, E / H)`, :math:`(N * H, L, S)` Note: It's optional to have the query/key/value inputs with more than three dimensions (for broadcast purpose). The ScaledDotProduct module will operate on the last three dimensions. where L is the target length, S is the source length, H is the number of attention heads, N is the batch size, and E is the embedding dimension. """ if self.batch_first: query, key, value = query.transpose(-3, -2), key.transpose(-3, -2 ), value.transpose(-3, -2) if bias_k is not None and bias_v is not None: assert key.size(-1) == bias_k.size(-1) and key.size(-2 ) == bias_k.size(-2) and bias_k.size(-3 ) == 1, 'Shape of bias_k is not supported' assert value.size(-1) == bias_v.size(-1) and value.size(-2 ) == bias_v.size(-2) and bias_v.size(-3 ) == 1, 'Shape of bias_v is not supported' key = torch.cat([key, bias_k]) value = torch.cat([value, bias_v]) if attn_mask is not None: attn_mask = torch.nn.functional.pad(attn_mask, (0, 1)) tgt_len, head_dim = query.size(-3), query.size(-1) assert query.size(-1) == key.size(-1) == value.size(-1 ), 'The feature dim of query, key, value must be equal.' assert key.size() == value.size(), 'Shape of key, value must match' src_len = key.size(-3) batch_heads = max(query.size(-2), key.size(-2)) query, key, value = query.transpose(-2, -3), key.transpose(-2, -3 ), value.transpose(-2, -3) query = query * float(head_dim) ** -0.5 if attn_mask is not None: if attn_mask.dim() != 3: raise RuntimeError('attn_mask must be a 3D tensor.') if attn_mask.size(-1) != src_len or attn_mask.size(-2 ) != tgt_len or attn_mask.size(-3) != 1 and attn_mask.size(-3 ) != batch_heads: raise RuntimeError('The size of the attn_mask is not correct.') if attn_mask.dtype != torch.bool: raise RuntimeError( 'Only bool tensor is supported for attn_mask') attn_output_weights = torch.matmul(query, key.transpose(-2, -1)) if attn_mask is not None: attn_output_weights.masked_fill_(attn_mask, -100000000.0) attn_output_weights = torch.nn.functional.softmax(attn_output_weights, dim=-1) attn_output_weights = torch.nn.functional.dropout(attn_output_weights, p=self.dropout, training=self.training) attn_output = torch.matmul(attn_output_weights, value) if self.batch_first: return attn_output, attn_output_weights else: return attn_output.transpose(-3, -2), attn_output_weights class TransformerEncoderLayer(nn.Module): def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, activation='gelu'): super(TransformerEncoderLayer, self).__init__() in_proj_container = InProjContainer(Linear(d_model, d_model), Linear(d_model, d_model), Linear(d_model, d_model)) self.mha = MultiheadAttentionContainer(nhead, in_proj_container, ScaledDotProduct(), Linear(d_model, d_model)) self.linear1 = Linear(d_model, dim_feedforward) self.dropout = Dropout(dropout) self.linear2 = Linear(dim_feedforward, d_model) self.norm1 = LayerNorm(d_model) self.norm2 = LayerNorm(d_model) self.dropout1 = Dropout(dropout) self.dropout2 = Dropout(dropout) if activation == 'relu': self.activation = F.relu elif activation == 'gelu': self.activation = F.gelu else: raise RuntimeError('only relu/gelu are supported, not {}'. format(activation)) def init_weights(self): self.mha.in_proj_container.query_proj.init_weights() self.mha.in_proj_container.key_proj.init_weights() self.mha.in_proj_container.value_proj.init_weights() self.mha.out_proj.init_weights() self.linear1.weight.data.normal_(mean=0.0, std=0.02) self.linear2.weight.data.normal_(mean=0.0, std=0.02) self.norm1.bias.data.zero_() self.norm1.weight.data.fill_(1.0) self.norm2.bias.data.zero_() self.norm2.weight.data.fill_(1.0) def forward(self, src, src_mask=None, src_key_padding_mask=None): attn_output, _attn_output_weights = self.mha(src, src, src, attn_mask=src_mask) src = src + self.dropout1(attn_output) src = self.norm1(src) src2 = self.linear2(self.dropout(self.activation(self.linear1(src)))) src = src + self.dropout2(src2) src = self.norm2(src) return src def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'d_model': 4, 'nhead': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn import torch.nn.functional as F from torch.nn import Linear from torch.nn import Dropout from torch.nn import LayerNorm from typing import Optional import torch.utils.data from typing import Tuple assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x2 % 4, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 4 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (x1 + 16 * y0), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_add_native_layer_norm_4(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + 0) tmp3 = tl.broadcast_to(tmp2, [XBLOCK]) tmp6 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr2 + 1) tmp9 = tl.broadcast_to(tmp8, [XBLOCK]) tmp13 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp14 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp15 = tl.load(in_ptr2 + 2) tmp16 = tl.broadcast_to(tmp15, [XBLOCK]) tmp20 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp21 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp22 = tl.load(in_ptr2 + 3) tmp23 = tl.broadcast_to(tmp22, [XBLOCK]) tmp4 = tmp1 + tmp3 tmp5 = tmp0 + tmp4 tmp10 = tmp7 + tmp9 tmp11 = tmp6 + tmp10 tmp12 = tmp5 + tmp11 tmp17 = tmp14 + tmp16 tmp18 = tmp13 + tmp17 tmp19 = tmp12 + tmp18 tmp24 = tmp21 + tmp23 tmp25 = tmp20 + tmp24 tmp26 = tmp19 + tmp25 tmp27 = 4.0 tmp28 = tmp26 / tmp27 tmp29 = tmp5 - tmp28 tmp30 = tmp29 * tmp29 tmp31 = tmp11 - tmp28 tmp32 = tmp31 * tmp31 tmp33 = tmp30 + tmp32 tmp34 = tmp18 - tmp28 tmp35 = tmp34 * tmp34 tmp36 = tmp33 + tmp35 tmp37 = tmp25 - tmp28 tmp38 = tmp37 * tmp37 tmp39 = tmp36 + tmp38 tmp40 = tmp39 / tmp27 tl.store(out_ptr0 + x0, tmp28, xmask) tl.store(out_ptr1 + x0, tmp40, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp2 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr6 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tmp6 = tmp4 - tmp5 tmp8 = 1e-05 tmp9 = tmp7 + tmp8 tmp10 = libdevice.rsqrt(tmp9) tmp11 = tmp6 * tmp10 tmp13 = tmp11 * tmp12 tmp15 = tmp13 + tmp14 tl.store(out_ptr0 + x2, tmp15, xmask) @triton.jit def triton_poi_fused_gelu_6(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_ptr0 + x0, None) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = 0.7071067811865476 tmp4 = tmp0 * tmp3 tmp5 = libdevice.erf(tmp4) tmp6 = 1.0 tmp7 = tmp5 + tmp6 tmp8 = tmp2 * tmp7 tl.store(out_ptr0 + x0, tmp8, None) @triton.jit def triton_poi_fused_add_7(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_out_ptr0 + x2, xmask) tmp2 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_native_layer_norm_8(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_native_layer_norm_9(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 4), (4, 1)) assert_size_stride(primals_9, (4,), (1,)) assert_size_stride(primals_10, (4,), (1,)) assert_size_stride(primals_11, (4,), (1,)) assert_size_stride(primals_12, (2048, 4), (4, 1)) assert_size_stride(primals_13, (2048,), (1,)) assert_size_stride(primals_14, (4, 2048), (2048, 1)) assert_size_stride(primals_15, (4,), (1,)) assert_size_stride(primals_16, (4,), (1,)) assert_size_stride(primals_17, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf1) del primals_4 del primals_5 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf2) del primals_6 del primals_7 buf3 = reinterpret_tensor(buf0, (16, 4, 1), (1, 16, 64), 0) del buf0 get_raw_stream(0) triton_poi_fused_mul_0[grid(64)](buf3, primals_3, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_3 buf4 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf3, reinterpret_tensor(buf1, (16, 1, 4), (1, 1, 16), 0), out=buf4) buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_1[grid(256)](buf4, buf5, 256, XBLOCK=128, num_warps=4, num_stages=1) buf6 = buf4 del buf4 triton_poi_fused__softmax_2[grid(256)](buf5, buf6, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf5 buf7 = empty_strided_cuda((16, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(buf6, reinterpret_tensor(buf2, (16, 4, 1), (1, 16, 1), 0), out=buf7) buf8 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_clone_3[grid(4, 16)](buf7, buf8, 4, 16, XBLOCK=16, YBLOCK=4, num_warps=1, num_stages=1) buf9 = reinterpret_tensor(buf7, (16, 4), (4, 1), 0) del buf7 extern_kernels.mm(reinterpret_tensor(buf8, (16, 4), (4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), out=buf9) buf10 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf11 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) triton_poi_fused_add_native_layer_norm_4[grid(16)](primals_1, buf9, primals_9, buf10, buf11, 16, XBLOCK=16, num_warps=1, num_stages=1) buf12 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_native_layer_norm_5[grid(64)](primals_1, buf9, primals_9, buf10, buf11, primals_10, primals_11, buf12, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_11 buf13 = empty_strided_cuda((16, 2048), (2048, 1), torch.float32) extern_kernels.addmm(primals_13, reinterpret_tensor(buf12, (16, 4), (4, 1), 0), reinterpret_tensor(primals_12, (4, 2048), (1, 4), 0 ), alpha=1, beta=1, out=buf13) del primals_13 buf14 = empty_strided_cuda((4, 4, 2048), (8192, 2048, 1), torch.float32 ) triton_poi_fused_gelu_6[grid(32768)](buf13, buf14, 32768, XBLOCK= 256, num_warps=4, num_stages=1) buf15 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf14, (16, 2048), (2048, 1), 0), reinterpret_tensor(primals_14, (2048, 4), (1, 2048), 0), out=buf15) buf16 = reinterpret_tensor(buf15, (4, 4, 4), (16, 4, 1), 0) del buf15 triton_poi_fused_add_7[grid(64)](buf16, buf12, primals_15, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_15 buf17 = buf11 del buf11 buf18 = buf10 del buf10 triton_poi_fused_native_layer_norm_8[grid(16)](buf16, buf17, buf18, 16, XBLOCK=16, num_warps=1, num_stages=1) buf19 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_native_layer_norm_9[grid(64)](buf16, buf17, buf18, primals_16, primals_17, buf19, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf17 del buf18 del primals_17 return (buf19, primals_1, primals_9, primals_10, primals_16, buf6, reinterpret_tensor(buf8, (16, 4), (4, 1), 0), buf9, reinterpret_tensor(buf12, (16, 4), (4, 1), 0), buf13, reinterpret_tensor(buf14, (16, 2048), (2048, 1), 0), buf16, primals_14, primals_12, primals_8, reinterpret_tensor(buf2, (16, 1, 4), (1, 1, 16), 0), reinterpret_tensor(buf3, (16, 1, 4), (1, 1, 16), 0), reinterpret_tensor(buf1, (16, 4, 1), (1, 16, 1), 0)) class InProjContainer(torch.nn.Module): def __init__(self, query_proj, key_proj, value_proj): """A in-proj container to project query/key/value in MultiheadAttention. This module happens before reshaping the projected query/key/value into multiple heads. See the linear layers (bottom) of Multi-head Attention in Fig 2 of Attention Is All You Need paper. Also check the usage example in torchtext.nn.MultiheadAttentionContainer. Args: query_proj: a proj layer for query. A typical projection layer is torch.nn.Linear. key_proj: a proj layer for key. A typical projection layer is torch.nn.Linear. value_proj: a proj layer for value. A typical projection layer is torch.nn.Linear. """ super(InProjContainer, self).__init__() self.query_proj = query_proj self.key_proj = key_proj self.value_proj = value_proj def forward(self, query: 'torch.Tensor', key: 'torch.Tensor', value: 'torch.Tensor') ->Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: """Projects the input sequences using in-proj layers. query/key/value are simply passed to the forward func of query/key/value_proj, respectively. Args: query (Tensor): The query to be projected. key (Tensor): The keys to be projected. value (Tensor): The values to be projected. Examples:: >>> import torch >>> from torchtext.nn import InProjContainer >>> embed_dim, bsz = 10, 64 >>> in_proj_container = InProjContainer(torch.nn.Linear(embed_dim, embed_dim), torch.nn.Linear(embed_dim, embed_dim), torch.nn.Linear(embed_dim, embed_dim)) >>> q = torch.rand((5, bsz, embed_dim)) >>> k = v = torch.rand((6, bsz, embed_dim)) >>> q, k, v = in_proj_container(q, k, v) """ return self.query_proj(query), self.key_proj(key), self.value_proj( value) class MultiheadAttentionContainer(torch.nn.Module): def __init__(self, nhead, in_proj_container, attention_layer, out_proj, batch_first=False): """ A multi-head attention container Args: nhead: the number of heads in the multiheadattention model in_proj_container: A container of multi-head in-projection linear layers (a.k.a nn.Linear). attention_layer: The custom attention layer. The input sent from MHA container to the attention layer is in the shape of `(..., L, N * H, E / H)` for query and `(..., S, N * H, E / H)` for key/value while the output shape of the attention layer is expected to be `(..., L, N * H, E / H)`. The attention_layer needs to support broadcast if users want the overall MultiheadAttentionContainer with broadcast. out_proj: The multi-head out-projection layer (a.k.a nn.Linear). batch_first: If ``True``, then the input and output tensors are provided as `(..., N, L, E)`. Default: ``False`` Examples:: >>> import torch >>> from torchtext.nn import MultiheadAttentionContainer, InProjContainer, ScaledDotProduct >>> embed_dim, num_heads, bsz = 10, 5, 64 >>> in_proj_container = InProjContainer(torch.nn.Linear(embed_dim, embed_dim), torch.nn.Linear(embed_dim, embed_dim), torch.nn.Linear(embed_dim, embed_dim)) >>> MHA = MultiheadAttentionContainer(num_heads, in_proj_container, ScaledDotProduct(), torch.nn.Linear(embed_dim, embed_dim)) >>> query = torch.rand((21, bsz, embed_dim)) >>> key = value = torch.rand((16, bsz, embed_dim)) >>> attn_output, attn_weights = MHA(query, key, value) >>> print(attn_output.shape) >>> torch.Size([21, 64, 10]) """ super(MultiheadAttentionContainer, self).__init__() self.nhead = nhead self.in_proj_container = in_proj_container self.attention_layer = attention_layer self.out_proj = out_proj self.batch_first = batch_first def forward(self, query: 'torch.Tensor', key: 'torch.Tensor', value: 'torch.Tensor', attn_mask: 'Optional[torch.Tensor]'=None, bias_k: 'Optional[torch.Tensor]'=None, bias_v: 'Optional[torch.Tensor]'=None ) ->Tuple[torch.Tensor, torch.Tensor]: """ Args: query (Tensor): The query of the attention function. See "Attention Is All You Need" for more details. key (Tensor): The keys of the attention function. See "Attention Is All You Need" for more details. value (Tensor): The values of the attention function. See "Attention Is All You Need" for more details. attn_mask (BoolTensor, optional): 3D mask that prevents attention to certain positions. bias_k (Tensor, optional): one more key and value sequence to be added to keys at sequence dim (dim=-3). Those are used for incremental decoding. Users should provide ``bias_v``. bias_v (Tensor, optional): one more key and value sequence to be added to values at sequence dim (dim=-3). Those are used for incremental decoding. Users should also provide ``bias_k``. Shape: - Inputs: - query: :math:`(..., L, N, E)` - key: :math:`(..., S, N, E)` - value: :math:`(..., S, N, E)` - attn_mask, bias_k and bias_v: same with the shape of the corresponding args in attention layer. - Outputs: - attn_output: :math:`(..., L, N, E)` - attn_output_weights: :math:`(N * H, L, S)` Note: It's optional to have the query/key/value inputs with more than three dimensions (for broadcast purpose). The MultiheadAttentionContainer module will operate on the last three dimensions. where where L is the target length, S is the sequence length, H is the number of attention heads, N is the batch size, and E is the embedding dimension. """ if self.batch_first: query, key, value = query.transpose(-3, -2), key.transpose(-3, -2 ), value.transpose(-3, -2) tgt_len, src_len, bsz, embed_dim = query.size(-3), key.size(-3 ), query.size(-2), query.size(-1) q, k, v = self.in_proj_container(query, key, value) assert q.size(-1 ) % self.nhead == 0, "query's embed_dim must be divisible by the number of heads" head_dim = q.size(-1) // self.nhead q = q.reshape(tgt_len, bsz * self.nhead, head_dim) assert k.size(-1 ) % self.nhead == 0, "key's embed_dim must be divisible by the number of heads" head_dim = k.size(-1) // self.nhead k = k.reshape(src_len, bsz * self.nhead, head_dim) assert v.size(-1 ) % self.nhead == 0, "value's embed_dim must be divisible by the number of heads" head_dim = v.size(-1) // self.nhead v = v.reshape(src_len, bsz * self.nhead, head_dim) attn_output, attn_output_weights = self.attention_layer(q, k, v, attn_mask=attn_mask, bias_k=bias_k, bias_v=bias_v) attn_output = attn_output.reshape(tgt_len, bsz, embed_dim) attn_output = self.out_proj(attn_output) if self.batch_first: attn_output = attn_output.transpose(-3, -2) return attn_output, attn_output_weights class ScaledDotProduct(torch.nn.Module): def __init__(self, dropout=0.0, batch_first=False): """Processes a projected query and key-value pair to apply scaled dot product attention. Args: dropout (float): probability of dropping an attention weight. batch_first: If ``True``, then the input and output tensors are provided as `(batch, seq, feature)`. Default: ``False`` Examples:: >>> import torch, torchtext >>> SDP = torchtext.nn.ScaledDotProduct(dropout=0.1) >>> q = torch.randn(21, 256, 3) >>> k = v = torch.randn(21, 256, 3) >>> attn_output, attn_weights = SDP(q, k, v) >>> print(attn_output.shape, attn_weights.shape) torch.Size([21, 256, 3]) torch.Size([256, 21, 21]) """ super(ScaledDotProduct, self).__init__() self.dropout = dropout self.batch_first = batch_first def forward(self, query: 'torch.Tensor', key: 'torch.Tensor', value: 'torch.Tensor', attn_mask: 'Optional[torch.Tensor]'=None, bias_k: 'Optional[torch.Tensor]'=None, bias_v: 'Optional[torch.Tensor]'=None ) ->Tuple[torch.Tensor, torch.Tensor]: """Uses a scaled dot product with the projected key-value pair to update the projected query. Args: query (Tensor): Projected query key (Tensor): Projected key value (Tensor): Projected value attn_mask (BoolTensor, optional): 3D mask that prevents attention to certain positions. attn_mask (BoolTensor, optional): 3D mask that prevents attention to certain positions. bias_k (Tensor, optional): one more key and value sequence to be added to keys at sequence dim (dim=-3). Those are used for incremental decoding. Users should provide ``bias_v``. bias_v (Tensor, optional): one more key and value sequence to be added to values at sequence dim (dim=-3). Those are used for incremental decoding. Users should also provide ``bias_k``. Shape: - query: :math:`(..., L, N * H, E / H)` - key: :math:`(..., S, N * H, E / H)` - value: :math:`(..., S, N * H, E / H)` - attn_mask: :math:`(N * H, L, S)`, positions with ``True`` are not allowed to attend while ``False`` values will be unchanged. - bias_k and bias_v:bias: :math:`(1, N * H, E / H)` - Output: :math:`(..., L, N * H, E / H)`, :math:`(N * H, L, S)` Note: It's optional to have the query/key/value inputs with more than three dimensions (for broadcast purpose). The ScaledDotProduct module will operate on the last three dimensions. where L is the target length, S is the source length, H is the number of attention heads, N is the batch size, and E is the embedding dimension. """ if self.batch_first: query, key, value = query.transpose(-3, -2), key.transpose(-3, -2 ), value.transpose(-3, -2) if bias_k is not None and bias_v is not None: assert key.size(-1) == bias_k.size(-1) and key.size(-2 ) == bias_k.size(-2) and bias_k.size(-3 ) == 1, 'Shape of bias_k is not supported' assert value.size(-1) == bias_v.size(-1) and value.size(-2 ) == bias_v.size(-2) and bias_v.size(-3 ) == 1, 'Shape of bias_v is not supported' key = torch.cat([key, bias_k]) value = torch.cat([value, bias_v]) if attn_mask is not None: attn_mask = torch.nn.functional.pad(attn_mask, (0, 1)) tgt_len, head_dim = query.size(-3), query.size(-1) assert query.size(-1) == key.size(-1) == value.size(-1 ), 'The feature dim of query, key, value must be equal.' assert key.size() == value.size(), 'Shape of key, value must match' src_len = key.size(-3) batch_heads = max(query.size(-2), key.size(-2)) query, key, value = query.transpose(-2, -3), key.transpose(-2, -3 ), value.transpose(-2, -3) query = query * float(head_dim) ** -0.5 if attn_mask is not None: if attn_mask.dim() != 3: raise RuntimeError('attn_mask must be a 3D tensor.') if attn_mask.size(-1) != src_len or attn_mask.size(-2 ) != tgt_len or attn_mask.size(-3) != 1 and attn_mask.size(-3 ) != batch_heads: raise RuntimeError('The size of the attn_mask is not correct.') if attn_mask.dtype != torch.bool: raise RuntimeError( 'Only bool tensor is supported for attn_mask') attn_output_weights = torch.matmul(query, key.transpose(-2, -1)) if attn_mask is not None: attn_output_weights.masked_fill_(attn_mask, -100000000.0) attn_output_weights = torch.nn.functional.softmax(attn_output_weights, dim=-1) attn_output_weights = torch.nn.functional.dropout(attn_output_weights, p=self.dropout, training=self.training) attn_output = torch.matmul(attn_output_weights, value) if self.batch_first: return attn_output, attn_output_weights else: return attn_output.transpose(-3, -2), attn_output_weights class TransformerEncoderLayerNew(nn.Module): def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, activation='gelu'): super(TransformerEncoderLayerNew, self).__init__() in_proj_container = InProjContainer(Linear(d_model, d_model), Linear(d_model, d_model), Linear(d_model, d_model)) self.mha = MultiheadAttentionContainer(nhead, in_proj_container, ScaledDotProduct(), Linear(d_model, d_model)) self.linear1 = Linear(d_model, dim_feedforward) self.dropout = Dropout(dropout) self.linear2 = Linear(dim_feedforward, d_model) self.norm1 = LayerNorm(d_model) self.norm2 = LayerNorm(d_model) self.dropout1 = Dropout(dropout) self.dropout2 = Dropout(dropout) if activation == 'relu': self.activation = F.relu elif activation == 'gelu': self.activation = F.gelu else: raise RuntimeError('only relu/gelu are supported, not {}'. format(activation)) def init_weights(self): self.mha.in_proj_container.query_proj.init_weights() self.mha.in_proj_container.key_proj.init_weights() self.mha.in_proj_container.value_proj.init_weights() self.mha.out_proj.init_weights() self.linear1.weight.data.normal_(mean=0.0, std=0.02) self.linear2.weight.data.normal_(mean=0.0, std=0.02) self.norm1.bias.data.zero_() self.norm1.weight.data.fill_(1.0) self.norm2.bias.data.zero_() self.norm2.weight.data.fill_(1.0) def forward(self, input_0): primals_2 = self.mha.in_proj_container.query_proj.weight primals_3 = self.mha.in_proj_container.query_proj.bias primals_4 = self.mha.in_proj_container.key_proj.weight primals_5 = self.mha.in_proj_container.key_proj.bias primals_6 = self.mha.in_proj_container.value_proj.weight primals_7 = self.mha.in_proj_container.value_proj.bias primals_8 = self.mha.out_proj.weight primals_9 = self.mha.out_proj.bias primals_12 = self.linear1.weight primals_13 = self.linear1.bias primals_14 = self.linear2.weight primals_10 = self.linear2.bias primals_11 = self.norm1.weight primals_15 = self.norm1.bias primals_16 = self.norm2.weight primals_17 = self.norm2.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17]) return output[0]
Hirni-Meshram3/text
TransformerEncoderLayer
false
5,330
[ "BSD-3-Clause" ]
1
84e6c7bd99c7fb3c229ff289aa722149e3136094
https://github.com/Hirni-Meshram3/text/tree/84e6c7bd99c7fb3c229ff289aa722149e3136094
import torch import torch.nn as nn import torch.nn.functional as F from torch.nn import Linear from torch.nn import Dropout from torch.nn import LayerNorm from typing import Optional import torch.utils.data from typing import Tuple class InProjContainer(torch.nn.Module): def __init__(self, query_proj, key_proj, value_proj): """A in-proj container to project query/key/value in MultiheadAttention. This module happens before reshaping the projected query/key/value into multiple heads. See the linear layers (bottom) of Multi-head Attention in Fig 2 of Attention Is All You Need paper. Also check the usage example in torchtext.nn.MultiheadAttentionContainer. Args: query_proj: a proj layer for query. A typical projection layer is torch.nn.Linear. key_proj: a proj layer for key. A typical projection layer is torch.nn.Linear. value_proj: a proj layer for value. A typical projection layer is torch.nn.Linear. """ super().__init__() self.query_proj = query_proj self.key_proj = key_proj self.value_proj = value_proj def forward(self, query: 'torch.Tensor', key: 'torch.Tensor', value: 'torch.Tensor') ->Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: """Projects the input sequences using in-proj layers. query/key/value are simply passed to the forward func of query/key/value_proj, respectively. Args: query (Tensor): The query to be projected. key (Tensor): The keys to be projected. value (Tensor): The values to be projected. Examples:: >>> import torch >>> from torchtext.nn import InProjContainer >>> embed_dim, bsz = 10, 64 >>> in_proj_container = InProjContainer(torch.nn.Linear(embed_dim, embed_dim), torch.nn.Linear(embed_dim, embed_dim), torch.nn.Linear(embed_dim, embed_dim)) >>> q = torch.rand((5, bsz, embed_dim)) >>> k = v = torch.rand((6, bsz, embed_dim)) >>> q, k, v = in_proj_container(q, k, v) """ return self.query_proj(query), self.key_proj(key), self.value_proj( value) class MultiheadAttentionContainer(torch.nn.Module): def __init__(self, nhead, in_proj_container, attention_layer, out_proj, batch_first=False): """ A multi-head attention container Args: nhead: the number of heads in the multiheadattention model in_proj_container: A container of multi-head in-projection linear layers (a.k.a nn.Linear). attention_layer: The custom attention layer. The input sent from MHA container to the attention layer is in the shape of `(..., L, N * H, E / H)` for query and `(..., S, N * H, E / H)` for key/value while the output shape of the attention layer is expected to be `(..., L, N * H, E / H)`. The attention_layer needs to support broadcast if users want the overall MultiheadAttentionContainer with broadcast. out_proj: The multi-head out-projection layer (a.k.a nn.Linear). batch_first: If ``True``, then the input and output tensors are provided as `(..., N, L, E)`. Default: ``False`` Examples:: >>> import torch >>> from torchtext.nn import MultiheadAttentionContainer, InProjContainer, ScaledDotProduct >>> embed_dim, num_heads, bsz = 10, 5, 64 >>> in_proj_container = InProjContainer(torch.nn.Linear(embed_dim, embed_dim), torch.nn.Linear(embed_dim, embed_dim), torch.nn.Linear(embed_dim, embed_dim)) >>> MHA = MultiheadAttentionContainer(num_heads, # ... truncated (>4000 chars) for memory efficiency
MultiHeadAttention
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/rh/crhy6nilvaajphuuoyup37xl4ncuiyrcb3fnt5aboux6wyvcg7ie.py # Topologically Sorted Source Nodes: [matmul_qk], Original ATen: [aten.clone] # Source node to ATen node mapping: # matmul_qk => clone # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 16], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (64*y1)), xmask & ymask) tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + (16*y3)), tmp2, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/ss/csssqqlybvm2dlovzxhxorvbhyaj46oqebdryly2s5obgs7rshwq.py # Topologically Sorted Source Nodes: [mul, scaled_attention_logits_1, attention_weights], Original ATen: [aten.mul, aten.add, aten._softmax] # Source node to ATen node mapping: # attention_weights => amax, div_1, exp, sub, sum_1 # mul => mul # scaled_attention_logits_1 => add # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_10, -1000000000.0), kwargs = {}) # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_11, %mul), kwargs = {}) # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%add, [-1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_per_fused__softmax_add_mul_1 = async_compile.triton('triton_per_fused__softmax_add_mul_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[256, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__softmax_add_mul_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__softmax_add_mul_1(in_ptr0, in_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 256 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0) tmp1 = tl.load(in_ptr1 + (r1 + (16*x0)), xmask, other=0.0) tmp2 = -1000000000.0 tmp3 = tmp1 * tmp2 tmp4 = tmp0 + tmp3 tmp5 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK]) tmp7 = tl.where(xmask, tmp5, float("-inf")) tmp8 = triton_helpers.max2(tmp7, 1)[:, None] tmp9 = tmp4 - tmp8 tmp10 = tl_math.exp(tmp9) tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK]) tmp13 = tl.where(xmask, tmp11, 0) tmp14 = tl.sum(tmp13, 1)[:, None] tmp15 = tmp10 / tmp14 tl.store(out_ptr2 + (r1 + (16*x0)), tmp15, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/mz/cmzlu2lip25blpsdqeby7ek5757op6xw3pdkxbdediou5szw32tx.py # Topologically Sorted Source Nodes: [output_1], Original ATen: [aten.clone] # Source node to ATen node mapping: # output_1 => clone_3 # Graph fragment: # %clone_3 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%view_15,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_2 = async_compile.triton('triton_poi_fused_clone_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 16 y1 = (yindex // 16) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (16*x2) + (64*y1)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/ok/cokamvfj3z4xuz3jmalftfns3huimimr3c4gzm52vaybmdliglu4.py # Topologically Sorted Source Nodes: [output_1], Original ATen: [aten.add] # Source node to ATen node mapping: # output_1 => add_1 # Graph fragment: # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_17, %primals_12), kwargs = {}) triton_poi_fused_add_3 = async_compile.triton('triton_poi_fused_add_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x2), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (4, ), (1, )) assert_size_stride(primals_9, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_10, (4, 4, 16, 16), (1024, 256, 16, 1)) assert_size_stride(primals_11, (4, 4), (4, 1)) assert_size_stride(primals_12, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_9, (64, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf2) del primals_7 buf3 = empty_strided_cuda((4, 4, 16, 1), (64, 16, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_qk], Original ATen: [aten.clone] stream0 = get_raw_stream(0) triton_poi_fused_clone_0.run(buf0, primals_3, buf3, 16, 16, grid=grid(16, 16), stream=stream0) del primals_3 buf4 = reinterpret_tensor(buf0, (4, 4, 1, 16), (64, 16, 16, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [matmul_qk], Original ATen: [aten.clone] triton_poi_fused_clone_0.run(buf1, primals_5, buf4, 16, 16, grid=grid(16, 16), stream=stream0) del primals_5 buf5 = empty_strided_cuda((16, 16, 16), (256, 16, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_qk], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf3, (16, 16, 1), (16, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 16), (16, 0, 1), 0), out=buf5) buf8 = empty_strided_cuda((4, 4, 16, 16), (1024, 256, 16, 1), torch.float32) # Topologically Sorted Source Nodes: [mul, scaled_attention_logits_1, attention_weights], Original ATen: [aten.mul, aten.add, aten._softmax] triton_per_fused__softmax_add_mul_1.run(buf5, primals_10, buf8, 256, 16, grid=grid(256), stream=stream0) del buf5 del primals_10 buf9 = reinterpret_tensor(buf1, (4, 4, 16, 1), (64, 16, 1, 1), 0); del buf1 # reuse # Topologically Sorted Source Nodes: [output], Original ATen: [aten.clone] triton_poi_fused_clone_0.run(buf2, primals_8, buf9, 16, 16, grid=grid(16, 16), stream=stream0) del primals_8 buf10 = reinterpret_tensor(buf2, (16, 16, 1), (16, 1, 1), 0); del buf2 # reuse # Topologically Sorted Source Nodes: [output], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf8, (16, 16, 16), (256, 16, 1), 0), reinterpret_tensor(buf9, (16, 16, 1), (16, 1, 0), 0), out=buf10) buf11 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [output_1], Original ATen: [aten.clone] triton_poi_fused_clone_2.run(buf10, buf11, 64, 4, grid=grid(64, 4), stream=stream0) buf12 = reinterpret_tensor(buf10, (64, 4), (4, 1), 0); del buf10 # reuse # Topologically Sorted Source Nodes: [output_1], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf11, (64, 4), (4, 1), 0), reinterpret_tensor(primals_11, (4, 4), (1, 4), 0), out=buf12) buf13 = reinterpret_tensor(buf12, (4, 16, 4), (64, 4, 1), 0); del buf12 # reuse # Topologically Sorted Source Nodes: [output_1], Original ATen: [aten.add] triton_poi_fused_add_3.run(buf13, primals_12, 256, grid=grid(256), stream=stream0) del primals_12 return (buf13, buf8, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), reinterpret_tensor(primals_9, (64, 4), (4, 1), 0), buf8, reinterpret_tensor(buf11, (64, 4), (4, 1), 0), primals_11, reinterpret_tensor(buf9, (16, 1, 16), (16, 1, 1), 0), reinterpret_tensor(buf3, (16, 1, 16), (16, 1, 1), 0), reinterpret_tensor(buf4, (16, 16, 1), (16, 1, 16), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((4, 4, 16, 16), (1024, 256, 16, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F def scaled_dot_product_attention(q, k, v, mask): """ q: query = (..., seq_len_q, depth) k: key = (..., seq_len_k, depth) v: value = (..., seq_len_v, depth_v) mask: float tensor with shape broadcastable to (..., seq_len_q, seq_len_k) must have seq_len_k == seq_len_v Returns: output, attention_weights """ matmul_qk = torch.matmul(q, torch.transpose(k, -1, -2)) dk = torch.tensor(k.shape[-1], dtype=torch.float32) scaled_attention_logits = matmul_qk / torch.sqrt(dk) if mask is not None: scaled_attention_logits += mask * -1000000000.0 attention_weights = F.softmax(scaled_attention_logits, dim=-1) output = torch.matmul(attention_weights, v) return output, attention_weights class MultiHeadAttention(nn.Module): def __init__(self, d_model, num_heads): super(MultiHeadAttention, self).__init__() self.num_heads = num_heads self.d_model = d_model assert d_model % self.num_heads == 0 self.depth = d_model // self.num_heads self.wq = nn.Linear(d_model, d_model) self.wk = nn.Linear(d_model, d_model) self.wv = nn.Linear(d_model, d_model) self.linear = nn.Linear(d_model, d_model) def split_heads(self, x, batch_size): """ Split the last dimension into (num_heads, depth). Transpose the result such that the shape is (batch_size, num_heads, seq_len, depth) x : (batch_size, seq_len, d_model) batch_size : int Returns: x : (batch_size, num_heads, seq_len, depth) """ x = x.view(batch_size, -1, self.num_heads, self.depth) return x.permute(0, 2, 1, 3) def forward(self, v, k, q, mask): batch_size = q.size()[0] q = self.wq(q) k = self.wk(k) v = self.wv(v) q = self.split_heads(q, batch_size) k = self.split_heads(k, batch_size) v = self.split_heads(v, batch_size) scaled_attention, attention_weights = scaled_dot_product_attention(q, k, v, mask) scaled_attention = scaled_attention.permute(0, 2, 1, 3) concat_attention = scaled_attention.reshape(batch_size, -1, self. d_model) output = self.linear(concat_attention) return output, attention_weights def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4]), torch.rand([4, 4, 16, 16])] def get_init_inputs(): return [[], {'d_model': 4, 'num_heads': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 64 * y1), xmask & ymask) tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 16 * y3), tmp2, xmask & ymask) @triton.jit def triton_per_fused__softmax_add_mul_1(in_ptr0, in_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 256 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tl.load(in_ptr1 + (r1 + 16 * x0), xmask, other=0.0) tmp2 = -1000000000.0 tmp3 = tmp1 * tmp2 tmp4 = tmp0 + tmp3 tmp5 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK]) tmp7 = tl.where(xmask, tmp5, float('-inf')) tmp8 = triton_helpers.max2(tmp7, 1)[:, None] tmp9 = tmp4 - tmp8 tmp10 = tl_math.exp(tmp9) tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK]) tmp13 = tl.where(xmask, tmp11, 0) tmp14 = tl.sum(tmp13, 1)[:, None] tmp15 = tmp10 / tmp14 tl.store(out_ptr2 + (r1 + 16 * x0), tmp15, xmask) @triton.jit def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 16 y1 = yindex // 16 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 16 * x2 + 64 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_add_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12 ) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_10, (4, 4, 16, 16), (1024, 256, 16, 1)) assert_size_stride(primals_11, (4, 4), (4, 1)) assert_size_stride(primals_12, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_9, (64, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf2) del primals_7 buf3 = empty_strided_cuda((4, 4, 16, 1), (64, 16, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(16, 16)](buf0, primals_3, buf3, 16, 16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1) del primals_3 buf4 = reinterpret_tensor(buf0, (4, 4, 1, 16), (64, 16, 16, 1), 0) del buf0 triton_poi_fused_clone_0[grid(16, 16)](buf1, primals_5, buf4, 16, 16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1) del primals_5 buf5 = empty_strided_cuda((16, 16, 16), (256, 16, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf3, (16, 16, 1), (16, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 16), (16, 0, 1), 0), out=buf5) buf8 = empty_strided_cuda((4, 4, 16, 16), (1024, 256, 16, 1), torch .float32) triton_per_fused__softmax_add_mul_1[grid(256)](buf5, primals_10, buf8, 256, 16, XBLOCK=32, num_warps=4, num_stages=1) del buf5 del primals_10 buf9 = reinterpret_tensor(buf1, (4, 4, 16, 1), (64, 16, 1, 1), 0) del buf1 triton_poi_fused_clone_0[grid(16, 16)](buf2, primals_8, buf9, 16, 16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1) del primals_8 buf10 = reinterpret_tensor(buf2, (16, 16, 1), (16, 1, 1), 0) del buf2 extern_kernels.bmm(reinterpret_tensor(buf8, (16, 16, 16), (256, 16, 1), 0), reinterpret_tensor(buf9, (16, 16, 1), (16, 1, 0), 0), out=buf10) buf11 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.float32) triton_poi_fused_clone_2[grid(64, 4)](buf10, buf11, 64, 4, XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1) buf12 = reinterpret_tensor(buf10, (64, 4), (4, 1), 0) del buf10 extern_kernels.mm(reinterpret_tensor(buf11, (64, 4), (4, 1), 0), reinterpret_tensor(primals_11, (4, 4), (1, 4), 0), out=buf12) buf13 = reinterpret_tensor(buf12, (4, 16, 4), (64, 4, 1), 0) del buf12 triton_poi_fused_add_3[grid(256)](buf13, primals_12, 256, XBLOCK= 128, num_warps=4, num_stages=1) del primals_12 return buf13, buf8, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0 ), reinterpret_tensor(primals_6, (64, 4), (4, 1), 0 ), reinterpret_tensor(primals_9, (64, 4), (4, 1), 0 ), buf8, reinterpret_tensor(buf11, (64, 4), (4, 1), 0 ), primals_11, reinterpret_tensor(buf9, (16, 1, 16), (16, 1, 1), 0 ), reinterpret_tensor(buf3, (16, 1, 16), (16, 1, 1), 0 ), reinterpret_tensor(buf4, (16, 16, 1), (16, 1, 16), 0) def scaled_dot_product_attention(q, k, v, mask): """ q: query = (..., seq_len_q, depth) k: key = (..., seq_len_k, depth) v: value = (..., seq_len_v, depth_v) mask: float tensor with shape broadcastable to (..., seq_len_q, seq_len_k) must have seq_len_k == seq_len_v Returns: output, attention_weights """ matmul_qk = torch.matmul(q, torch.transpose(k, -1, -2)) dk = torch.tensor(k.shape[-1], dtype=torch.float32) scaled_attention_logits = matmul_qk / torch.sqrt(dk) if mask is not None: scaled_attention_logits += mask * -1000000000.0 attention_weights = F.softmax(scaled_attention_logits, dim=-1) output = torch.matmul(attention_weights, v) return output, attention_weights class MultiHeadAttentionNew(nn.Module): def __init__(self, d_model, num_heads): super(MultiHeadAttentionNew, self).__init__() self.num_heads = num_heads self.d_model = d_model assert d_model % self.num_heads == 0 self.depth = d_model // self.num_heads self.wq = nn.Linear(d_model, d_model) self.wk = nn.Linear(d_model, d_model) self.wv = nn.Linear(d_model, d_model) self.linear = nn.Linear(d_model, d_model) def split_heads(self, x, batch_size): """ Split the last dimension into (num_heads, depth). Transpose the result such that the shape is (batch_size, num_heads, seq_len, depth) x : (batch_size, seq_len, d_model) batch_size : int Returns: x : (batch_size, num_heads, seq_len, depth) """ x = x.view(batch_size, -1, self.num_heads, self.depth) return x.permute(0, 2, 1, 3) def forward(self, input_0, input_1, input_2, input_3): primals_2 = self.wq.weight primals_3 = self.wq.bias primals_4 = self.wk.weight primals_5 = self.wk.bias primals_7 = self.wv.weight primals_8 = self.wv.bias primals_11 = self.linear.weight primals_12 = self.linear.bias primals_1 = input_0 primals_6 = input_1 primals_9 = input_2 primals_10 = input_3 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12]) return output[0], output[1]
IanYHWu/tied-representation-learning
MultiHeadAttention
false
5,331
[ "MIT" ]
1
bda9814dc40cf552f7bdd2ade78f5e2958a7ea83
https://github.com/IanYHWu/tied-representation-learning/tree/bda9814dc40cf552f7bdd2ade78f5e2958a7ea83
import torch import torch.nn as nn import torch.nn.functional as F def scaled_dot_product_attention(q, k, v, mask): """ q: query = (..., seq_len_q, depth) k: key = (..., seq_len_k, depth) v: value = (..., seq_len_v, depth_v) mask: float tensor with shape broadcastable to (..., seq_len_q, seq_len_k) must have seq_len_k == seq_len_v Returns: output, attention_weights """ matmul_qk = torch.matmul(q, torch.transpose(k, -1, -2)) dk = torch.tensor(k.shape[-1], dtype=torch.float32) scaled_attention_logits = matmul_qk / torch.sqrt(dk) if mask is not None: scaled_attention_logits += mask * -1000000000.0 attention_weights = F.softmax(scaled_attention_logits, dim=-1) output = torch.matmul(attention_weights, v) return output, attention_weights class Model(nn.Module): def __init__(self, d_model, num_heads): super().__init__() self.num_heads = num_heads self.d_model = d_model assert d_model % self.num_heads == 0 self.depth = d_model // self.num_heads self.wq = nn.Linear(d_model, d_model) self.wk = nn.Linear(d_model, d_model) self.wv = nn.Linear(d_model, d_model) self.linear = nn.Linear(d_model, d_model) def split_heads(self, x, batch_size): """ Split the last dimension into (num_heads, depth). Transpose the result such that the shape is (batch_size, num_heads, seq_len, depth) x : (batch_size, seq_len, d_model) batch_size : int Returns: x : (batch_size, num_heads, seq_len, depth) """ x = x.view(batch_size, -1, self.num_heads, self.depth) return x.permute(0, 2, 1, 3) def forward(self, v, k, q, mask): batch_size = q.size()[0] q = self.wq(q) k = self.wk(k) v = self.wv(v) q = self.split_heads(q, batch_size) k = self.split_heads(k, batch_size) v = self.split_heads(v, batch_size) scaled_attention, attention_weights = scaled_dot_product_attention(q, k, v, mask) scaled_attention = scaled_attention.permute(0, 2, 1, 3) concat_attention = scaled_attention.reshape(batch_size, -1, self. d_model) output = self.linear(concat_attention) return output, attention_weights def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4]), torch.rand([4, 4, 16, 16])] def get_init_inputs(): return [4, 4]
RelPositionMultiHeadedAttention
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/st/cstdkeotatufdikalo45nsknyfku4ofvhlziami6ybawevy72lal.py # Topologically Sorted Source Nodes: [matrix_ac, matrix_bd], Original ATen: [aten.clone] # Source node to ATen node mapping: # matrix_ac => clone # matrix_bd => clone_2 # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand,), kwargs = {memory_format: torch.contiguous_format}) # %clone_2 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_2,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (y0), ymask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (y0), ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp2 + tmp5 tl.store(out_ptr0 + (x2 + (4*y3)), tmp4, xmask & ymask) tl.store(out_ptr1 + (x2 + (4*y3)), tmp6, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/mh/cmhet4vfl4jlxtge4zzaaa2nugvxpr5f4ge7rs72qwe2aow7vxwy.py # Topologically Sorted Source Nodes: [matrix_ac], Original ATen: [aten.clone] # Source node to ATen node mapping: # matrix_ac => clone_1 # Graph fragment: # %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_1,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_1 = async_compile.triton('triton_poi_fused_clone_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_1(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + (4*y3)), tmp2, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/xp/cxp3ouwpdhdlmipppq44wjaey2obmthzec7uqoddmpoigfmupxdx.py # Topologically Sorted Source Nodes: [matrix_bd], Original ATen: [aten.clone] # Source node to ATen node mapping: # matrix_bd => clone_3 # Graph fragment: # %clone_3 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_3,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_2 = async_compile.triton('triton_poi_fused_clone_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/u3/cu3opkmzrgckmrk64elyf7ml2a2gq2grpcbtiejtq54tloftwrmr.py # Topologically Sorted Source Nodes: [mask], Original ATen: [aten.eq] # Source node to ATen node mapping: # mask => eq # Graph fragment: # %eq : [num_users=3] = call_function[target=torch.ops.aten.eq.Scalar](args = (%unsqueeze, 0), kwargs = {}) triton_poi_fused_eq_3 = async_compile.triton('triton_poi_fused_eq_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_eq_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_eq_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 0.0 tmp2 = tmp0 == tmp1 tl.store(out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/el/celmnq4cmthcneo2qgxp66qy3keynodxqjqu5pn2ewxs4o4pk4pv.py # Topologically Sorted Source Nodes: [add_2, scores, scores_1, softmax], Original ATen: [aten.add, aten.div, aten.masked_fill, aten._softmax] # Source node to ATen node mapping: # add_2 => add_2 # scores => div # scores_1 => full_default, where # softmax => amax, exp, sub, sum_1 # Graph fragment: # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_14, %view_17), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_2, 1.0), kwargs = {}) # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -inf), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%eq, %full_default, %div), kwargs = {}) # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%where, [-1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) triton_poi_fused__softmax_add_div_masked_fill_4 = async_compile.triton('triton_poi_fused__softmax_add_div_masked_fill_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*i1', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_add_div_masked_fill_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 12, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_add_div_masked_fill_4(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = (xindex // 16) x3 = xindex tmp0 = tl.load(in_ptr0 + ((4*x0) + (16*x2)), xmask, eviction_policy='evict_last').to(tl.int1) tmp1 = tl.load(in_ptr1 + (4*x3), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + (4*x3), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (1 + (4*x0) + (16*x2)), xmask, eviction_policy='evict_last').to(tl.int1) tmp9 = tl.load(in_ptr1 + (1 + (4*x3)), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr2 + (1 + (4*x3)), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr0 + (2 + (4*x0) + (16*x2)), xmask, eviction_policy='evict_last').to(tl.int1) tmp16 = tl.load(in_ptr1 + (2 + (4*x3)), xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr2 + (2 + (4*x3)), xmask, eviction_policy='evict_last') tmp22 = tl.load(in_ptr0 + (3 + (4*x0) + (16*x2)), xmask, eviction_policy='evict_last').to(tl.int1) tmp23 = tl.load(in_ptr1 + (3 + (4*x3)), xmask, eviction_policy='evict_last') tmp24 = tl.load(in_ptr2 + (3 + (4*x3)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = 1.0 tmp5 = tmp3 * tmp4 tmp6 = float("-inf") tmp7 = tl.where(tmp0, tmp6, tmp5) tmp11 = tmp9 + tmp10 tmp12 = tmp11 * tmp4 tmp13 = tl.where(tmp8, tmp6, tmp12) tmp14 = triton_helpers.maximum(tmp7, tmp13) tmp18 = tmp16 + tmp17 tmp19 = tmp18 * tmp4 tmp20 = tl.where(tmp15, tmp6, tmp19) tmp21 = triton_helpers.maximum(tmp14, tmp20) tmp25 = tmp23 + tmp24 tmp26 = tmp25 * tmp4 tmp27 = tl.where(tmp22, tmp6, tmp26) tmp28 = triton_helpers.maximum(tmp21, tmp27) tmp29 = tmp7 - tmp28 tmp30 = tl_math.exp(tmp29) tmp31 = tmp13 - tmp28 tmp32 = tl_math.exp(tmp31) tmp33 = tmp30 + tmp32 tmp34 = tmp20 - tmp28 tmp35 = tl_math.exp(tmp34) tmp36 = tmp33 + tmp35 tmp37 = tmp27 - tmp28 tmp38 = tl_math.exp(tmp37) tmp39 = tmp36 + tmp38 tl.store(out_ptr0 + (x3), tmp28, xmask) tl.store(out_ptr1 + (x3), tmp39, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/fn/cfnmi7pncqelvz56byxmokejmd37qckxv3wfecynqkl3e6gmjuq3.py # Topologically Sorted Source Nodes: [add_2, scores, scores_1, softmax, attn], Original ATen: [aten.add, aten.div, aten.masked_fill, aten._softmax] # Source node to ATen node mapping: # add_2 => add_2 # attn => full_default_1, where_1 # scores => div # scores_1 => full_default, where # softmax => div_1, exp, sub # Graph fragment: # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_14, %view_17), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_2, 1.0), kwargs = {}) # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -inf), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%eq, %full_default, %div), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) # %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where_1 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%eq, %full_default_1, %div_1), kwargs = {}) triton_poi_fused__softmax_add_div_masked_fill_5 = async_compile.triton('triton_poi_fused__softmax_add_div_masked_fill_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_add_div_masked_fill_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_add_div_masked_fill_5(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = (xindex // 64) x4 = xindex % 16 x5 = xindex x6 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x4 + (16*x3)), xmask, eviction_policy='evict_last').to(tl.int1) tmp1 = tl.load(in_out_ptr0 + (x5), xmask) tmp2 = tl.load(in_ptr1 + (x5), xmask) tmp8 = tl.load(in_ptr2 + (x6), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr3 + (x6), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = 1.0 tmp5 = tmp3 * tmp4 tmp6 = float("-inf") tmp7 = tl.where(tmp0, tmp6, tmp5) tmp9 = tmp7 - tmp8 tmp10 = tl_math.exp(tmp9) tmp12 = tmp10 / tmp11 tmp13 = 0.0 tmp14 = tl.where(tmp0, tmp13, tmp12) tl.store(in_out_ptr0 + (x5), tmp12, xmask) tl.store(out_ptr0 + (x5), tmp14, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (4, ), (1, )) assert_size_stride(primals_9, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_10, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_11, (4, 4), (4, 1)) assert_size_stride(primals_12, (4, 1), (1, 1)) assert_size_stride(primals_13, (4, 1), (1, 1)) assert_size_stride(primals_14, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_15, (4, 4), (4, 1)) assert_size_stride(primals_16, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_6, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_9, (16, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf2) del primals_7 buf3 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_3], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_10, (16, 4), (4, 1), 0), reinterpret_tensor(primals_11, (4, 4), (1, 4), 0), out=buf3) del primals_11 buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) buf7 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [matrix_ac, matrix_bd], Original ATen: [aten.clone] stream0 = get_raw_stream(0) triton_poi_fused_clone_0.run(buf0, primals_3, primals_12, primals_13, buf4, buf7, 16, 4, grid=grid(16, 4), stream=stream0) del primals_12 del primals_13 del primals_3 buf5 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [matrix_ac], Original ATen: [aten.clone] triton_poi_fused_clone_1.run(buf1, primals_5, buf5, 16, 4, grid=grid(16, 4), stream=stream0) del primals_5 buf6 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [matrix_ac], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf5, (16, 1, 4), (4, 0, 1), 0), out=buf6) buf8 = reinterpret_tensor(buf1, (4, 4, 1, 4), (16, 4, 4, 1), 0); del buf1 # reuse # Topologically Sorted Source Nodes: [matrix_bd], Original ATen: [aten.clone] triton_poi_fused_clone_2.run(buf3, buf8, 16, 4, grid=grid(16, 4), stream=stream0) buf9 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [matrix_bd], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf8, (16, 1, 4), (4, 0, 1), 0), out=buf9) buf10 = empty_strided_cuda((4, 1, 4, 4), (16, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [mask], Original ATen: [aten.eq] triton_poi_fused_eq_3.run(primals_14, buf10, 64, grid=grid(64), stream=stream0) del primals_14 buf11 = reinterpret_tensor(buf3, (4, 4, 4, 1), (16, 4, 1, 64), 0); del buf3 # reuse buf12 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) # Topologically Sorted Source Nodes: [add_2, scores, scores_1, softmax], Original ATen: [aten.add, aten.div, aten.masked_fill, aten._softmax] triton_poi_fused__softmax_add_div_masked_fill_4.run(buf10, buf6, buf9, buf11, buf12, 64, grid=grid(64), stream=stream0) buf13 = reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf6 # reuse buf14 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [add_2, scores, scores_1, softmax, attn], Original ATen: [aten.add, aten.div, aten.masked_fill, aten._softmax] triton_poi_fused__softmax_add_div_masked_fill_5.run(buf13, buf10, buf9, buf11, buf12, buf14, 256, grid=grid(256), stream=stream0) del buf9 buf15 = reinterpret_tensor(buf12, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf12 # reuse # Topologically Sorted Source Nodes: [x], Original ATen: [aten.clone] triton_poi_fused_clone_1.run(buf2, primals_8, buf15, 16, 4, grid=grid(16, 4), stream=stream0) del primals_8 buf16 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0); del buf2 # reuse # Topologically Sorted Source Nodes: [x], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf14, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf15, (16, 4, 1), (4, 1, 0), 0), out=buf16) buf17 = reinterpret_tensor(buf11, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf11 # reuse # Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone] triton_poi_fused_clone_2.run(buf16, buf17, 16, 4, grid=grid(16, 4), stream=stream0) buf18 = reinterpret_tensor(buf16, (16, 4), (4, 1), 0); del buf16 # reuse # Topologically Sorted Source Nodes: [linear_4], Original ATen: [aten.addmm] extern_kernels.addmm(primals_16, reinterpret_tensor(buf17, (16, 4), (4, 1), 0), reinterpret_tensor(primals_15, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf18) del primals_16 return (reinterpret_tensor(buf18, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (16, 4), (4, 1), 0), reinterpret_tensor(primals_9, (16, 4), (4, 1), 0), reinterpret_tensor(primals_10, (16, 4), (4, 1), 0), buf10, buf13, reinterpret_tensor(buf17, (16, 4), (4, 1), 0), primals_15, reinterpret_tensor(buf14, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf15, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf7, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf8, (16, 4, 1), (4, 1, 4), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf5, (16, 4, 1), (4, 1, 4), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((4, 1), (1, 1), device='cuda:0', dtype=torch.float32) primals_13 = rand_strided((4, 1), (1, 1), device='cuda:0', dtype=torch.float32) primals_14 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_15 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_16 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch from typing import Optional from typing import Tuple from torch import nn class MultiHeadedAttention(nn.Module): """Multi-Head Attention layer. Args: n_head (int): The number of heads. n_feat (int): The number of features. dropout_rate (float): Dropout rate. """ def __init__(self, n_head: 'int', n_feat: 'int', dropout_rate: 'float'): """Construct an MultiHeadedAttention object.""" super().__init__() assert n_feat % n_head == 0 self.d_k = n_feat // n_head self.h = n_head self.linear_q = nn.Linear(n_feat, n_feat) self.linear_k = nn.Linear(n_feat, n_feat) self.linear_v = nn.Linear(n_feat, n_feat) self.linear_out = nn.Linear(n_feat, n_feat) self.dropout = nn.Dropout(p=dropout_rate) def forward_qkv(self, query: 'torch.Tensor', key: 'torch.Tensor', value: 'torch.Tensor') ->Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: """Transform query, key and value. Args: query (torch.Tensor): Query tensor (#batch, time1, size). key (torch.Tensor): Key tensor (#batch, time2, size). value (torch.Tensor): Value tensor (#batch, time2, size). Returns: torch.Tensor: Transformed query tensor, size (#batch, n_head, time1, d_k). torch.Tensor: Transformed key tensor, size (#batch, n_head, time2, d_k). torch.Tensor: Transformed value tensor, size (#batch, n_head, time2, d_k). """ n_batch = query.size(0) q = self.linear_q(query).view(n_batch, -1, self.h, self.d_k) k = self.linear_k(key).view(n_batch, -1, self.h, self.d_k) v = self.linear_v(value).view(n_batch, -1, self.h, self.d_k) q = q.transpose(1, 2) k = k.transpose(1, 2) v = v.transpose(1, 2) return q, k, v def forward_attention(self, value: 'torch.Tensor', scores: 'torch.Tensor', mask: 'Optional[torch.Tensor]') ->torch.Tensor: """Compute attention context vector. Args: value (torch.Tensor): Transformed value, size (#batch, n_head, time2, d_k). scores (torch.Tensor): Attention score, size (#batch, n_head, time1, time2). mask (torch.Tensor): Mask, size (#batch, 1, time2) or (#batch, time1, time2). Returns: torch.Tensor: Transformed value (#batch, time1, d_model) weighted by the attention score (#batch, time1, time2). """ n_batch = value.size(0) if mask is not None: mask = mask.unsqueeze(1).eq(0) scores = scores.masked_fill(mask, -float('inf')) attn = torch.softmax(scores, dim=-1).masked_fill(mask, 0.0) else: attn = torch.softmax(scores, dim=-1) p_attn = self.dropout(attn) x = torch.matmul(p_attn, value) x = x.transpose(1, 2).contiguous().view(n_batch, -1, self.h * self.d_k) return self.linear_out(x) def forward(self, query: 'torch.Tensor', key: 'torch.Tensor', value: 'torch.Tensor', mask: 'Optional[torch.Tensor]') ->torch.Tensor: """Compute scaled dot product attention. Args: query (torch.Tensor): Query tensor (#batch, time1, size). key (torch.Tensor): Key tensor (#batch, time2, size). value (torch.Tensor): Value tensor (#batch, time2, size). mask (torch.Tensor): Mask tensor (#batch, 1, time2) or (#batch, time1, time2). Returns: torch.Tensor: Output tensor (#batch, time1, d_model). """ q, k, v = self.forward_qkv(query, key, value) scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(self.d_k) return self.forward_attention(v, scores, mask) class RelPositionMultiHeadedAttention(MultiHeadedAttention): """Multi-Head Attention layer with relative position encoding. Paper: https://arxiv.org/abs/1901.02860 Args: n_head (int): The number of heads. n_feat (int): The number of features. dropout_rate (float): Dropout rate. """ def __init__(self, n_head, n_feat, dropout_rate): """Construct an RelPositionMultiHeadedAttention object.""" super().__init__(n_head, n_feat, dropout_rate) self.linear_pos = nn.Linear(n_feat, n_feat, bias=False) self.pos_bias_u = nn.Parameter(torch.Tensor(self.h, self.d_k)) self.pos_bias_v = nn.Parameter(torch.Tensor(self.h, self.d_k)) torch.nn.init.xavier_uniform_(self.pos_bias_u) torch.nn.init.xavier_uniform_(self.pos_bias_v) def rel_shift(self, x, zero_triu: 'bool'=False): """Compute relative positinal encoding. Args: x (torch.Tensor): Input tensor (batch, time, size). zero_triu (bool): If true, return the lower triangular part of the matrix. Returns: torch.Tensor: Output tensor. """ zero_pad = torch.zeros((x.size()[0], x.size()[1], x.size()[2], 1), device=x.device, dtype=x.dtype) x_padded = torch.cat([zero_pad, x], dim=-1) x_padded = x_padded.view(x.size()[0], x.size()[1], x.size(3) + 1, x .size(2)) x = x_padded[:, :, 1:].view_as(x) if zero_triu: ones = torch.ones((x.size(2), x.size(3))) x = x * torch.tril(ones, x.size(3) - x.size(2))[None, None, :, :] return x def forward(self, query: 'torch.Tensor', key: 'torch.Tensor', value: 'torch.Tensor', pos_emb: 'torch.Tensor', mask: 'Optional[torch.Tensor]' ): """Compute 'Scaled Dot Product Attention' with rel. positional encoding. Args: query (torch.Tensor): Query tensor (#batch, time1, size). key (torch.Tensor): Key tensor (#batch, time2, size). value (torch.Tensor): Value tensor (#batch, time2, size). pos_emb (torch.Tensor): Positional embedding tensor (#batch, time2, size). mask (torch.Tensor): Mask tensor (#batch, 1, time2) or (#batch, time1, time2). Returns: torch.Tensor: Output tensor (#batch, time1, d_model). """ q, k, v = self.forward_qkv(query, key, value) q = q.transpose(1, 2) n_batch_pos = pos_emb.size(0) p = self.linear_pos(pos_emb).view(n_batch_pos, -1, self.h, self.d_k) p = p.transpose(1, 2) q_with_bias_u = (q + self.pos_bias_u).transpose(1, 2) q_with_bias_v = (q + self.pos_bias_v).transpose(1, 2) matrix_ac = torch.matmul(q_with_bias_u, k.transpose(-2, -1)) matrix_bd = torch.matmul(q_with_bias_v, p.transpose(-2, -1)) scores = (matrix_ac + matrix_bd) / math.sqrt(self.d_k) return self.forward_attention(v, scores, mask) def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'n_head': 4, 'n_feat': 4, 'dropout_rate': 0.5}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import math from typing import Optional from typing import Tuple from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + y0, ymask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp2 + tmp5 tl.store(out_ptr0 + (x2 + 4 * y3), tmp4, xmask & ymask) tl.store(out_ptr1 + (x2 + 4 * y3), tmp6, xmask & ymask) @triton.jit def triton_poi_fused_clone_1(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask) @triton.jit def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_eq_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.0 tmp2 = tmp0 == tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused__softmax_add_div_masked_fill_4(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex // 16 x3 = xindex tmp0 = tl.load(in_ptr0 + (4 * x0 + 16 * x2), xmask, eviction_policy= 'evict_last').to(tl.int1) tmp1 = tl.load(in_ptr1 + 4 * x3, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + 4 * x3, xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (1 + 4 * x0 + 16 * x2), xmask, eviction_policy ='evict_last').to(tl.int1) tmp9 = tl.load(in_ptr1 + (1 + 4 * x3), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr2 + (1 + 4 * x3), xmask, eviction_policy='evict_last' ) tmp15 = tl.load(in_ptr0 + (2 + 4 * x0 + 16 * x2), xmask, eviction_policy='evict_last').to(tl.int1) tmp16 = tl.load(in_ptr1 + (2 + 4 * x3), xmask, eviction_policy='evict_last' ) tmp17 = tl.load(in_ptr2 + (2 + 4 * x3), xmask, eviction_policy='evict_last' ) tmp22 = tl.load(in_ptr0 + (3 + 4 * x0 + 16 * x2), xmask, eviction_policy='evict_last').to(tl.int1) tmp23 = tl.load(in_ptr1 + (3 + 4 * x3), xmask, eviction_policy='evict_last' ) tmp24 = tl.load(in_ptr2 + (3 + 4 * x3), xmask, eviction_policy='evict_last' ) tmp3 = tmp1 + tmp2 tmp4 = 1.0 tmp5 = tmp3 * tmp4 tmp6 = float('-inf') tmp7 = tl.where(tmp0, tmp6, tmp5) tmp11 = tmp9 + tmp10 tmp12 = tmp11 * tmp4 tmp13 = tl.where(tmp8, tmp6, tmp12) tmp14 = triton_helpers.maximum(tmp7, tmp13) tmp18 = tmp16 + tmp17 tmp19 = tmp18 * tmp4 tmp20 = tl.where(tmp15, tmp6, tmp19) tmp21 = triton_helpers.maximum(tmp14, tmp20) tmp25 = tmp23 + tmp24 tmp26 = tmp25 * tmp4 tmp27 = tl.where(tmp22, tmp6, tmp26) tmp28 = triton_helpers.maximum(tmp21, tmp27) tmp29 = tmp7 - tmp28 tmp30 = tl_math.exp(tmp29) tmp31 = tmp13 - tmp28 tmp32 = tl_math.exp(tmp31) tmp33 = tmp30 + tmp32 tmp34 = tmp20 - tmp28 tmp35 = tl_math.exp(tmp34) tmp36 = tmp33 + tmp35 tmp37 = tmp27 - tmp28 tmp38 = tl_math.exp(tmp37) tmp39 = tmp36 + tmp38 tl.store(out_ptr0 + x3, tmp28, xmask) tl.store(out_ptr1 + x3, tmp39, xmask) @triton.jit def triton_poi_fused__softmax_add_div_masked_fill_5(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex // 64 x4 = xindex % 16 x5 = xindex x6 = xindex // 4 tmp0 = tl.load(in_ptr0 + (x4 + 16 * x3), xmask, eviction_policy= 'evict_last').to(tl.int1) tmp1 = tl.load(in_out_ptr0 + x5, xmask) tmp2 = tl.load(in_ptr1 + x5, xmask) tmp8 = tl.load(in_ptr2 + x6, xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr3 + x6, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = 1.0 tmp5 = tmp3 * tmp4 tmp6 = float('-inf') tmp7 = tl.where(tmp0, tmp6, tmp5) tmp9 = tmp7 - tmp8 tmp10 = tl_math.exp(tmp9) tmp12 = tmp10 / tmp11 tmp13 = 0.0 tmp14 = tl.where(tmp0, tmp13, tmp12) tl.store(in_out_ptr0 + x5, tmp12, xmask) tl.store(out_ptr0 + x5, tmp14, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_10, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_11, (4, 4), (4, 1)) assert_size_stride(primals_12, (4, 1), (1, 1)) assert_size_stride(primals_13, (4, 1), (1, 1)) assert_size_stride(primals_14, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_15, (4, 4), (4, 1)) assert_size_stride(primals_16, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_6, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_9, (16, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf2) del primals_7 buf3 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_10, (16, 4), (4, 1), 0 ), reinterpret_tensor(primals_11, (4, 4), (1, 4), 0), out=buf3) del primals_11 buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) buf7 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(16, 4)](buf0, primals_3, primals_12, primals_13, buf4, buf7, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) del primals_12 del primals_13 del primals_3 buf5 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0) del buf0 triton_poi_fused_clone_1[grid(16, 4)](buf1, primals_5, buf5, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) del primals_5 buf6 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf5, (16, 1, 4), (4, 0, 1), 0), out=buf6) buf8 = reinterpret_tensor(buf1, (4, 4, 1, 4), (16, 4, 4, 1), 0) del buf1 triton_poi_fused_clone_2[grid(16, 4)](buf3, buf8, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf9 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf8, (16, 1, 4), (4, 0, 1), 0), out=buf9) buf10 = empty_strided_cuda((4, 1, 4, 4), (16, 16, 4, 1), torch.bool) triton_poi_fused_eq_3[grid(64)](primals_14, buf10, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_14 buf11 = reinterpret_tensor(buf3, (4, 4, 4, 1), (16, 4, 1, 64), 0) del buf3 buf12 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) triton_poi_fused__softmax_add_div_masked_fill_4[grid(64)](buf10, buf6, buf9, buf11, buf12, 64, XBLOCK=64, num_warps=1, num_stages=1) buf13 = reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf6 buf14 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_add_div_masked_fill_5[grid(256)](buf13, buf10, buf9, buf11, buf12, buf14, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf9 buf15 = reinterpret_tensor(buf12, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf12 triton_poi_fused_clone_1[grid(16, 4)](buf2, primals_8, buf15, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) del primals_8 buf16 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0) del buf2 extern_kernels.bmm(reinterpret_tensor(buf14, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf15, (16, 4, 1), (4, 1, 0), 0), out=buf16) buf17 = reinterpret_tensor(buf11, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf11 triton_poi_fused_clone_2[grid(16, 4)](buf16, buf17, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf18 = reinterpret_tensor(buf16, (16, 4), (4, 1), 0) del buf16 extern_kernels.addmm(primals_16, reinterpret_tensor(buf17, (16, 4), (4, 1), 0), reinterpret_tensor(primals_15, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf18) del primals_16 return reinterpret_tensor(buf18, (4, 4, 4), (16, 4, 1), 0 ), reinterpret_tensor(primals_1, (16, 4), (4, 1), 0 ), reinterpret_tensor(primals_6, (16, 4), (4, 1), 0 ), reinterpret_tensor(primals_9, (16, 4), (4, 1), 0 ), reinterpret_tensor(primals_10, (16, 4), (4, 1), 0 ), buf10, buf13, reinterpret_tensor(buf17, (16, 4), (4, 1), 0 ), primals_15, reinterpret_tensor(buf14, (16, 4, 4), (16, 1, 4), 0 ), reinterpret_tensor(buf15, (16, 1, 4), (4, 1, 1), 0 ), reinterpret_tensor(buf7, (16, 1, 4), (4, 1, 1), 0 ), reinterpret_tensor(buf8, (16, 4, 1), (4, 1, 4), 0 ), reinterpret_tensor(buf4, (16, 1, 4), (4, 1, 1), 0 ), reinterpret_tensor(buf5, (16, 4, 1), (4, 1, 4), 0) class MultiHeadedAttention(nn.Module): """Multi-Head Attention layer. Args: n_head (int): The number of heads. n_feat (int): The number of features. dropout_rate (float): Dropout rate. """ def __init__(self, n_head: 'int', n_feat: 'int', dropout_rate: 'float'): """Construct an MultiHeadedAttention object.""" super().__init__() assert n_feat % n_head == 0 self.d_k = n_feat // n_head self.h = n_head self.linear_q = nn.Linear(n_feat, n_feat) self.linear_k = nn.Linear(n_feat, n_feat) self.linear_v = nn.Linear(n_feat, n_feat) self.linear_out = nn.Linear(n_feat, n_feat) self.dropout = nn.Dropout(p=dropout_rate) def forward_qkv(self, query: 'torch.Tensor', key: 'torch.Tensor', value: 'torch.Tensor') ->Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: """Transform query, key and value. Args: query (torch.Tensor): Query tensor (#batch, time1, size). key (torch.Tensor): Key tensor (#batch, time2, size). value (torch.Tensor): Value tensor (#batch, time2, size). Returns: torch.Tensor: Transformed query tensor, size (#batch, n_head, time1, d_k). torch.Tensor: Transformed key tensor, size (#batch, n_head, time2, d_k). torch.Tensor: Transformed value tensor, size (#batch, n_head, time2, d_k). """ n_batch = query.size(0) q = self.linear_q(query).view(n_batch, -1, self.h, self.d_k) k = self.linear_k(key).view(n_batch, -1, self.h, self.d_k) v = self.linear_v(value).view(n_batch, -1, self.h, self.d_k) q = q.transpose(1, 2) k = k.transpose(1, 2) v = v.transpose(1, 2) return q, k, v def forward_attention(self, value: 'torch.Tensor', scores: 'torch.Tensor', mask: 'Optional[torch.Tensor]') ->torch.Tensor: """Compute attention context vector. Args: value (torch.Tensor): Transformed value, size (#batch, n_head, time2, d_k). scores (torch.Tensor): Attention score, size (#batch, n_head, time1, time2). mask (torch.Tensor): Mask, size (#batch, 1, time2) or (#batch, time1, time2). Returns: torch.Tensor: Transformed value (#batch, time1, d_model) weighted by the attention score (#batch, time1, time2). """ n_batch = value.size(0) if mask is not None: mask = mask.unsqueeze(1).eq(0) scores = scores.masked_fill(mask, -float('inf')) attn = torch.softmax(scores, dim=-1).masked_fill(mask, 0.0) else: attn = torch.softmax(scores, dim=-1) p_attn = self.dropout(attn) x = torch.matmul(p_attn, value) x = x.transpose(1, 2).contiguous().view(n_batch, -1, self.h * self.d_k) return self.linear_out(x) def forward(self, query: 'torch.Tensor', key: 'torch.Tensor', value: 'torch.Tensor', mask: 'Optional[torch.Tensor]') ->torch.Tensor: """Compute scaled dot product attention. Args: query (torch.Tensor): Query tensor (#batch, time1, size). key (torch.Tensor): Key tensor (#batch, time2, size). value (torch.Tensor): Value tensor (#batch, time2, size). mask (torch.Tensor): Mask tensor (#batch, 1, time2) or (#batch, time1, time2). Returns: torch.Tensor: Output tensor (#batch, time1, d_model). """ q, k, v = self.forward_qkv(query, key, value) scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(self.d_k) return self.forward_attention(v, scores, mask) class RelPositionMultiHeadedAttentionNew(MultiHeadedAttention): """Multi-Head Attention layer with relative position encoding. Paper: https://arxiv.org/abs/1901.02860 Args: n_head (int): The number of heads. n_feat (int): The number of features. dropout_rate (float): Dropout rate. """ def __init__(self, n_head, n_feat, dropout_rate): """Construct an RelPositionMultiHeadedAttention object.""" super().__init__(n_head, n_feat, dropout_rate) self.linear_pos = nn.Linear(n_feat, n_feat, bias=False) self.pos_bias_u = nn.Parameter(torch.Tensor(self.h, self.d_k)) self.pos_bias_v = nn.Parameter(torch.Tensor(self.h, self.d_k)) torch.nn.init.xavier_uniform_(self.pos_bias_u) torch.nn.init.xavier_uniform_(self.pos_bias_v) def rel_shift(self, x, zero_triu: 'bool'=False): """Compute relative positinal encoding. Args: x (torch.Tensor): Input tensor (batch, time, size). zero_triu (bool): If true, return the lower triangular part of the matrix. Returns: torch.Tensor: Output tensor. """ zero_pad = torch.zeros((x.size()[0], x.size()[1], x.size()[2], 1), device=x.device, dtype=x.dtype) x_padded = torch.cat([zero_pad, x], dim=-1) x_padded = x_padded.view(x.size()[0], x.size()[1], x.size(3) + 1, x .size(2)) x = x_padded[:, :, 1:].view_as(x) if zero_triu: ones = torch.ones((x.size(2), x.size(3))) x = x * torch.tril(ones, x.size(3) - x.size(2))[None, None, :, :] return x def forward(self, input_0, input_1, input_2, input_3, input_4): primals_12 = self.pos_bias_u primals_13 = self.pos_bias_v primals_2 = self.linear_q.weight primals_3 = self.linear_q.bias primals_4 = self.linear_k.weight primals_5 = self.linear_k.bias primals_7 = self.linear_v.weight primals_8 = self.linear_v.bias primals_11 = self.linear_out.weight primals_16 = self.linear_out.bias primals_15 = self.linear_pos.weight primals_1 = input_0 primals_6 = input_1 primals_9 = input_2 primals_10 = input_3 primals_14 = input_4 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16]) return output[0]
Honghe/wenet
RelPositionMultiHeadedAttention
false
5,332
[ "Apache-2.0" ]
1
4421790bec3778df591816d69f0449930a9be321
https://github.com/Honghe/wenet/tree/4421790bec3778df591816d69f0449930a9be321
import math import torch from typing import Optional from typing import Tuple from torch import nn class MultiHeadedAttention(nn.Module): """Multi-Head Attention layer. Args: n_head (int): The number of heads. n_feat (int): The number of features. dropout_rate (float): Dropout rate. """ def __init__(self, n_head: 'int', n_feat: 'int', dropout_rate: 'float'): """Construct an MultiHeadedAttention object.""" super().__init__() assert n_feat % n_head == 0 self.d_k = n_feat // n_head self.h = n_head self.linear_q = nn.Linear(n_feat, n_feat) self.linear_k = nn.Linear(n_feat, n_feat) self.linear_v = nn.Linear(n_feat, n_feat) self.linear_out = nn.Linear(n_feat, n_feat) self.dropout = nn.Dropout(p=dropout_rate) def forward_qkv(self, query: 'torch.Tensor', key: 'torch.Tensor', value: 'torch.Tensor') ->Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: """Transform query, key and value. Args: query (torch.Tensor): Query tensor (#batch, time1, size). key (torch.Tensor): Key tensor (#batch, time2, size). value (torch.Tensor): Value tensor (#batch, time2, size). Returns: torch.Tensor: Transformed query tensor, size (#batch, n_head, time1, d_k). torch.Tensor: Transformed key tensor, size (#batch, n_head, time2, d_k). torch.Tensor: Transformed value tensor, size (#batch, n_head, time2, d_k). """ n_batch = query.size(0) q = self.linear_q(query).view(n_batch, -1, self.h, self.d_k) k = self.linear_k(key).view(n_batch, -1, self.h, self.d_k) v = self.linear_v(value).view(n_batch, -1, self.h, self.d_k) q = q.transpose(1, 2) k = k.transpose(1, 2) v = v.transpose(1, 2) return q, k, v def forward_attention(self, value: 'torch.Tensor', scores: 'torch.Tensor', mask: 'Optional[torch.Tensor]') ->torch.Tensor: """Compute attention context vector. Args: value (torch.Tensor): Transformed value, size (#batch, n_head, time2, d_k). scores (torch.Tensor): Attention score, size (#batch, n_head, time1, time2). mask (torch.Tensor): Mask, size (#batch, 1, time2) or (#batch, time1, time2). Returns: torch.Tensor: Transformed value (#batch, time1, d_model) weighted by the attention score (#batch, time1, time2). """ n_batch = value.size(0) if mask is not None: mask = mask.unsqueeze(1).eq(0) scores = scores.masked_fill(mask, -float('inf')) attn = torch.softmax(scores, dim=-1).masked_fill(mask, 0.0) else: attn = torch.softmax(scores, dim=-1) p_attn = self.dropout(attn) x = torch.matmul(p_attn, value) x = x.transpose(1, 2).contiguous().view(n_batch, -1, self.h * self.d_k) return self.linear_out(x) def forward(self, query: 'torch.Tensor', key: 'torch.Tensor', value: 'torch.Tensor', mask: 'Optional[torch.Tensor]') ->torch.Tensor: """Compute scaled dot product attention. Args: query (torch.Tensor): Query tensor (#batch, time1, size). key (torch.Tensor): Key tensor (#batch, time2, size). value (torch.Tensor): Value tensor (#batch, time2, size). mask (torch.Tensor): Mask tensor (#batch, 1, time2) or (#batch, time1, time2). Returns: torch.Tensor: Output tensor (#batch, time1, d_model). """ q, k, v = self.forward_qkv(query, key, value) scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(self.d_k) return self.forward_attention(v, scores, mask) class Model(MultiHeadedAttention): """Multi-Head Attention layer with rela # ... truncated (>4000 chars) for memory efficiency
ToLongTensor
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/ml/cmltiy5hrv2wqhdqt3fezcfhgimkd7dolxjtsc7omtdmbpeu3exe.py # Topologically Sorted Source Nodes: [tensor], Original ATen: [aten._to_copy] # Source node to ATen node mapping: # tensor => convert_element_type # Graph fragment: # %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%arg0_1, torch.float32), kwargs = {}) triton_poi_fused__to_copy_0 = async_compile.triton('triton_poi_fused__to_copy_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__to_copy_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tl.store(out_ptr0 + (x0), tmp0, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [tensor], Original ATen: [aten._to_copy] stream0 = get_raw_stream(0) triton_poi_fused__to_copy_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import Tensor from typing import List import torch.nn as nn import torch.utils.data class ToLongTensor(nn.Module): """Convert a list of integers to long tensor """ def __init__(self): super(ToLongTensor, self).__init__() def forward(self, tokens: 'List[List[int]]') ->Tensor: return torch.tensor(tokens) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__to_copy_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tl.store(out_ptr0 + x0, tmp0, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__to_copy_0[grid(256)](arg0_1, buf0, 256, XBLOCK= 128, num_warps=4, num_stages=1) del arg0_1 return buf0, class ToLongTensorNew(nn.Module): """Convert a list of integers to long tensor """ def __init__(self): super(ToLongTensorNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
Hirni-Meshram3/text
ToLongTensor
false
5,333
[ "BSD-3-Clause" ]
1
84e6c7bd99c7fb3c229ff289aa722149e3136094
https://github.com/Hirni-Meshram3/text/tree/84e6c7bd99c7fb3c229ff289aa722149e3136094
import torch from torch import Tensor from typing import List import torch.nn as nn import torch.utils.data class Model(nn.Module): """Convert a list of integers to long tensor """ def __init__(self): super().__init__() def forward(self, tokens: 'List[List[int]]') ->Tensor: return torch.tensor(tokens) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
MsgNorm
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/dv/cdvohkptuslet5o4k7ir3iwxhauwqpaue5iwojpfdtbzbkod67k6.py # Topologically Sorted Source Nodes: [msg, x_norm, mul, msg_1], Original ATen: [aten.div, aten.linalg_vector_norm, aten.mul] # Source node to ATen node mapping: # msg => div # msg_1 => mul_1 # mul => mul # x_norm => pow_3, pow_4, sum_2 # Graph fragment: # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%arg0_1, %expand), kwargs = {}) # %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg1_1, 2), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_3, [1], True), kwargs = {}) # %pow_4 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_2, 0.5), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, %pow_4), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %arg2_1), kwargs = {}) triton_poi_fused_div_linalg_vector_norm_mul_0 = async_compile.triton('triton_poi_fused_div_linalg_vector_norm_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_linalg_vector_norm_mul_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 10, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_linalg_vector_norm_mul_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr1 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp18 = tl.load(in_ptr1 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp21 = tl.load(in_ptr1 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp24 = tl.load(in_ptr1 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp29 = tl.load(in_ptr2 + (0)) tmp30 = tl.broadcast_to(tmp29, [XBLOCK]) tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-12 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tmp17 = tmp16 * tmp16 tmp19 = tmp18 * tmp18 tmp20 = tmp17 + tmp19 tmp22 = tmp21 * tmp21 tmp23 = tmp20 + tmp22 tmp25 = tmp24 * tmp24 tmp26 = tmp23 + tmp25 tmp27 = libdevice.sqrt(tmp26) tmp28 = tmp15 * tmp27 tmp31 = tmp28 * tmp30 tl.store(in_out_ptr0 + (x3), tmp31, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [msg, x_norm, mul, msg_1], Original ATen: [aten.div, aten.linalg_vector_norm, aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_div_linalg_vector_norm_mul_0.run(buf1, arg0_1, arg1_1, arg2_1, 256, grid=grid(256), stream=stream0) del arg0_1 del arg1_1 del arg2_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg2_1 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1, arg2_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F class MsgNorm(torch.nn.Module): def __init__(self, learn_msg_scale=False): super(MsgNorm, self).__init__() self.msg_scale = torch.nn.Parameter(torch.Tensor([1.0]), requires_grad=learn_msg_scale) def forward(self, x, msg, p=2): msg = F.normalize(msg, p=p, dim=1) x_norm = x.norm(p=p, dim=1, keepdim=True) msg = msg * x_norm * self.msg_scale return msg def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_div_linalg_vector_norm_mul_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp16 = tl.load(in_ptr1 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp18 = tl.load(in_ptr1 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp21 = tl.load(in_ptr1 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp24 = tl.load(in_ptr1 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp29 = tl.load(in_ptr2 + 0) tmp30 = tl.broadcast_to(tmp29, [XBLOCK]) tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-12 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tmp17 = tmp16 * tmp16 tmp19 = tmp18 * tmp18 tmp20 = tmp17 + tmp19 tmp22 = tmp21 * tmp21 tmp23 = tmp20 + tmp22 tmp25 = tmp24 * tmp24 tmp26 = tmp23 + tmp25 tmp27 = libdevice.sqrt(tmp26) tmp28 = tmp15 * tmp27 tmp31 = tmp28 * tmp30 tl.store(in_out_ptr0 + x3, tmp31, xmask) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_div_linalg_vector_norm_mul_0[grid(256)](buf1, arg0_1, arg1_1, arg2_1, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 del arg1_1 del arg2_1 return buf1, class MsgNormNew(torch.nn.Module): def __init__(self, learn_msg_scale=False): super(MsgNormNew, self).__init__() self.msg_scale = torch.nn.Parameter(torch.Tensor([1.0]), requires_grad=learn_msg_scale) def forward(self, input_0, input_1): arg2_1 = self.msg_scale arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
Hermine2015/deep_gcns_torch
MsgNorm
false
5,334
[ "MIT" ]
1
69524a2a5de2ba4c3adb0fea0a090b3e9b4510d4
https://github.com/Hermine2015/deep_gcns_torch/tree/69524a2a5de2ba4c3adb0fea0a090b3e9b4510d4
import torch import torch.nn.functional as F class Model(torch.nn.Module): def __init__(self, learn_msg_scale=False): super().__init__() self.msg_scale = torch.nn.Parameter(torch.Tensor([1.0]), requires_grad=learn_msg_scale) def forward(self, x, msg, p=2): msg = F.normalize(msg, p=p, dim=1) x_norm = x.norm(p=p, dim=1, keepdim=True) msg = msg * x_norm * self.msg_scale return msg def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
make_dense
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/74/c74xlrcdmmc2trtur5rpkyhcpr6eqjx5iq3jf77lbu462fmxu3zc.py # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.cat] # Source node to ATen node mapping: # out_1 => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_2, %relu], 1), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 16) % 8 x0 = xindex % 16 x2 = (xindex // 128) x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + (16*x1) + (64*x2)), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.load(in_ptr1 + (x0 + (16*((-4) + x1)) + (64*x2)), tmp6 & xmask, other=0.0) tmp10 = tl.full([1], 0, tl.int32) tmp11 = triton_helpers.maximum(tmp10, tmp9) tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype) tmp13 = tl.where(tmp6, tmp11, tmp12) tmp14 = tl.where(tmp4, tmp5, tmp13) tl.store(out_ptr0 + (x3), tmp14, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/4s/c4sk3j6bynrnc25abteokiyffg626j5o2tqgjhdcibavx37q4342.py # Topologically Sorted Source Nodes: [out], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # out => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_relu_threshold_backward_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp3 = 0.0 tmp4 = tmp2 <= tmp3 tl.store(out_ptr0 + (x0), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1, 1), padding=(2, 2), dilation=(2, 2), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(primals_2, buf0, buf1, 512, grid=grid(512), stream=stream0) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [out], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_1.run(buf0, buf2, 256, grid=grid(256), stream=stream0) del buf0 return (buf1, primals_1, primals_2, buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class make_dense(nn.Module): def __init__(self, nChannels, growthRate, kernel_size=3): super(make_dense, self).__init__() self.conv = nn.Conv2d(nChannels, growthRate, kernel_size= kernel_size, padding=kernel_size - 1, bias=False, dilation=2) def forward(self, x): out = F.relu(self.conv(x)) out = torch.cat((x, out), 1) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'nChannels': 4, 'growthRate': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 % 8 x0 = xindex % 16 x2 = xindex // 128 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (x0 + 16 * (-4 + x1) + 64 * x2), tmp6 & xmask, other=0.0) tmp10 = tl.full([1], 0, tl.int32) tmp11 = triton_helpers.maximum(tmp10, tmp9) tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype) tmp13 = tl.where(tmp6, tmp11, tmp12) tmp14 = tl.where(tmp4, tmp5, tmp13) tl.store(out_ptr0 + x3, tmp14, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp3 = 0.0 tmp4 = tmp2 <= tmp3 tl.store(out_ptr0 + x0, tmp4, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1, 1), padding=(2, 2), dilation=(2, 2), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(512)](primals_2, buf0, buf1, 512, XBLOCK=256, num_warps=4, num_stages=1) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_1[grid(256)](buf0, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf0 return buf1, primals_1, primals_2, buf2 class make_denseNew(nn.Module): def __init__(self, nChannels, growthRate, kernel_size=3): super(make_denseNew, self).__init__() self.conv = nn.Conv2d(nChannels, growthRate, kernel_size= kernel_size, padding=kernel_size - 1, bias=False, dilation=2) def forward(self, input_0): primals_1 = self.conv.weight primals_2 = input_0 output = call([primals_1, primals_2]) return output[0]
HusterRC/FHDR
make_dense
false
5,335
[ "BSD-3-Clause" ]
1
f61fea7eba3de8430fc2891afdabc77dd8e5f13f
https://github.com/HusterRC/FHDR/tree/f61fea7eba3de8430fc2891afdabc77dd8e5f13f
import torch import torch.nn as nn import torch.nn.functional as F class Model(nn.Module): def __init__(self, nChannels, growthRate, kernel_size=3): super().__init__() self.conv = nn.Conv2d(nChannels, growthRate, kernel_size= kernel_size, padding=kernel_size - 1, bias=False, dilation=2) def forward(self, x): out = F.relu(self.conv(x)) out = torch.cat((x, out), 1) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 4]
_Full
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/g3/cg3cnzwtnnab4s4ogssyxpgoapayggisbxy4pbbkkzyy6uieeybg.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu] # Source node to ATen node mapping: # x => relu # Graph fragment: # %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_3), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {}) triton_poi_fused_relu_0 = async_compile.triton('triton_poi_fused_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 100 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/o5/co5xgxhjhlo7bknaeay7cl5wcvnoj7iz2upia7zbzhelekrx6iym.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten._log_softmax] # Source node to ATen node mapping: # x_1 => amax, exp, log, sub, sub_1, sum_1 # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%addmm_1, [1], True), kwargs = {}) # %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%addmm_1, %amax), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {}) triton_per_fused__log_softmax_1 = async_compile.triton('triton_per_fused__log_softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[4, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__log_softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__log_softmax_1(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 4 rnumel = 10 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = rindex < rnumel r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (10*x0)), rmask & xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(rmask & xmask, tmp1, float("-inf")) tmp4 = triton_helpers.max2(tmp3, 1)[:, None] tmp5 = tmp0 - tmp4 tmp6 = tl_math.exp(tmp5) tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.where(rmask & xmask, tmp7, 0) tmp10 = tl.sum(tmp9, 1)[:, None] tmp11 = tl_math.log(tmp10) tmp12 = tmp5 - tmp11 tl.store(out_ptr2 + (r1 + (10*x0)), tmp12, rmask & xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 784), (784, 1)) assert_size_stride(primals_2, (100, 784), (784, 1)) assert_size_stride(primals_3, (100, ), (1, )) assert_size_stride(primals_4, (10, 100), (100, 1)) assert_size_stride(primals_5, (10, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 100), (100, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (784, 100), (1, 784), 0), out=buf0) del primals_2 buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_relu_0.run(buf1, primals_3, 400, grid=grid(400), stream=stream0) del primals_3 buf2 = empty_strided_cuda((4, 10), (10, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, buf1, reinterpret_tensor(primals_4, (100, 10), (1, 100), 0), alpha=1, beta=1, out=buf2) del primals_5 buf5 = empty_strided_cuda((4, 10), (10, 1), torch.float32) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten._log_softmax] triton_per_fused__log_softmax_1.run(buf2, buf5, 4, 10, grid=grid(4), stream=stream0) del buf2 return (buf5, primals_1, buf1, buf5, primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 784), (784, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((100, 784), (784, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((100, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((10, 100), (100, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch class _Full(torch.nn.Module): """ Simple, small fully connected model. """ def __init__(self): """ Model parameter constructor. """ super().__init__() self._f1 = torch.nn.Linear(28 * 28, 100) self._f2 = torch.nn.Linear(100, 10) def forward(self, x): """ Model's forward pass. Args: x Input tensor Returns: Output tensor """ x = torch.nn.functional.relu(self._f1(x.view(-1, 28 * 28))) x = torch.nn.functional.log_softmax(self._f2(x), dim=1) return x def get_inputs(): return [torch.rand([4, 784])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 100 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_per_fused__log_softmax_1(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 rnumel = 10 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] rmask = rindex < rnumel r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 10 * x0), rmask & xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(rmask & xmask, tmp1, float('-inf')) tmp4 = triton_helpers.max2(tmp3, 1)[:, None] tmp5 = tmp0 - tmp4 tmp6 = tl_math.exp(tmp5) tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.where(rmask & xmask, tmp7, 0) tmp10 = tl.sum(tmp9, 1)[:, None] tmp11 = tl_math.log(tmp10) tmp12 = tmp5 - tmp11 tl.store(out_ptr2 + (r1 + 10 * x0), tmp12, rmask & xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 784), (784, 1)) assert_size_stride(primals_2, (100, 784), (784, 1)) assert_size_stride(primals_3, (100,), (1,)) assert_size_stride(primals_4, (10, 100), (100, 1)) assert_size_stride(primals_5, (10,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 100), (100, 1), torch.float32) extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (784, 100), (1, 784), 0), out=buf0) del primals_2 buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_relu_0[grid(400)](buf1, primals_3, 400, XBLOCK=128, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((4, 10), (10, 1), torch.float32) extern_kernels.addmm(primals_5, buf1, reinterpret_tensor(primals_4, (100, 10), (1, 100), 0), alpha=1, beta=1, out=buf2) del primals_5 buf5 = empty_strided_cuda((4, 10), (10, 1), torch.float32) triton_per_fused__log_softmax_1[grid(4)](buf2, buf5, 4, 10, XBLOCK= 1, num_warps=2, num_stages=1) del buf2 return buf5, primals_1, buf1, buf5, primals_4 class _FullNew(torch.nn.Module): """ Simple, small fully connected model. """ def __init__(self): """ Model parameter constructor. """ super().__init__() self._f1 = torch.nn.Linear(28 * 28, 100) self._f2 = torch.nn.Linear(100, 10) def forward(self, input_0): primals_2 = self._f1.weight primals_3 = self._f1.bias primals_4 = self._f2.weight primals_5 = self._f2.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
IKACE/DifferentialByzantine-1
_Full
false
5,336
[ "MIT" ]
1
809fd6e070fedeb87a6dbff6f883e93e3c5c8e09
https://github.com/IKACE/DifferentialByzantine-1/tree/809fd6e070fedeb87a6dbff6f883e93e3c5c8e09
import torch class Model(torch.nn.Module): """ Simple, small fully connected model. """ def __init__(self): """ Model parameter constructor. """ super().__init__() self._f1 = torch.nn.Linear(28 * 28, 100) self._f2 = torch.nn.Linear(100, 10) def forward(self, x): """ Model's forward pass. Args: x Input tensor Returns: Output tensor """ x = torch.nn.functional.relu(self._f1(x.view(-1, 28 * 28))) x = torch.nn.functional.log_softmax(self._f2(x), dim=1) return x def get_inputs(): return [torch.rand([4, 784])] def get_init_inputs(): return []
ConvDownsample2d
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/sp/cspptmlazllfxjrnm4wtxf36nkf7a6dtwt5pgmfzbjzw3mpteini.py # Topologically Sorted Source Nodes: [y_7], Original ATen: [aten.convolution] # Source node to ATen node mapping: # y_7 => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%view_2, %primals_2, None, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 6) % 6 x0 = xindex % 6 x2 = (xindex // 36) x4 = xindex tmp0 = (-1) + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = (-1) + x0 tmp6 = tmp5 >= tmp1 tmp7 = tmp5 < tmp3 tmp8 = tmp2 & tmp4 tmp9 = tmp8 & tmp6 tmp10 = tmp9 & tmp7 tmp11 = tl.load(in_ptr0 + ((-5) + x0 + (4*x1) + (16*x2)), tmp10 & xmask, other=0.0) tl.store(out_ptr0 + (x4), tmp11, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/bq/cbq7ippnsrke66kwwv3mtzwnftnasbovqh2ycx7sylieltdqxzwc.py # Topologically Sorted Source Nodes: [y_9], Original ATen: [aten._unsafe_index] # Source node to ATen node mapping: # y_9 => _unsafe_index # Graph fragment: # %_unsafe_index : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%view_3, [None, None, %unsqueeze, %convert_element_type_1]), kwargs = {}) triton_poi_fused__unsafe_index_1 = async_compile.triton('triton_poi_fused__unsafe_index_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__unsafe_index_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 4) % 4 x0 = xindex % 4 x2 = (xindex // 16) x4 = xindex tmp0 = x1 tmp1 = tmp0.to(tl.float32) tmp2 = 0.75 tmp3 = tmp1 * tmp2 tmp4 = tmp3.to(tl.int32) tmp5 = x0 tmp6 = tmp5.to(tl.float32) tmp7 = tmp6 * tmp2 tmp8 = tmp7.to(tl.int32) tmp9 = tl.load(in_ptr0 + (tmp8 + (3*tmp4) + (9*x2)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (x4), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/cb/ccboftrxjzgggc5x4t5yuvdyezgexb5fddn4klpz7y4junftadp4.py # Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul] # Source node to ATen node mapping: # mul => mul_4 # Graph fragment: # %mul_4 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_3, 0.25), kwargs = {}) triton_poi_fused_mul_2 = async_compile.triton('triton_poi_fused_mul_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 0.25 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/l3/cl3cdbofywwgmthy323oolanmjbqggupaerhdksmxevl7tpckmmj.py # Topologically Sorted Source Nodes: [mul_1, x1], Original ATen: [aten.mul, aten.convolution] # Source node to ATen node mapping: # mul_1 => mul_5 # x1 => convolution_1 # Graph fragment: # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_4, 1), kwargs = {}) # %convolution_1 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%view_4, %mul_4, %mul_5, [2, 2], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_mul_3 = async_compile.triton('triton_poi_fused_convolution_mul_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_mul_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_mul_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/bn/cbn3acyzz7fdxq6e7cjupa6zfqi4a2jupiyykevg3mrh354gsock.py # Topologically Sorted Source Nodes: [mul_1, x1, out, wrapped_sqrt, out_1], Original ATen: [aten.mul, aten.convolution, aten.leaky_relu, aten.sqrt, aten.leaky_relu_backward] # Source node to ATen node mapping: # mul_1 => mul_5 # out => gt, mul_6, where # out_1 => mul_7 # wrapped_sqrt => full_default # x1 => convolution_1 # Graph fragment: # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_4, 1), kwargs = {}) # %convolution_1 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%view_4, %mul_4, %mul_5, [2, 2], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_1, 0), kwargs = {}) # %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_1, 0.2), kwargs = {}) # %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt, %convolution_1, %mul_6), kwargs = {}) # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 1.414213562373095), kwargs = {dtype: torch.float64, layout: torch.strided, device: cpu, pin_memory: False}) # %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%where, %full_default), kwargs = {}) # %gt_1 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%where, 0), kwargs = {}) triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_mul_sqrt_4 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_mul_sqrt_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_mul_sqrt_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_mul_sqrt_4(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 4) % 4 tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tmp8 = 1.414213562373095 tmp9 = tmp7 * tmp8 tmp10 = tmp7 > tmp3 tl.store(out_ptr0 + (x3), tmp9, xmask) tl.store(out_ptr1 + (x3), tmp10, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1, 1, 4, 4), (16, 16, 4, 1)) assert_size_stride(primals_3, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_4, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 1, 6, 6), (36, 6, 6, 1), torch.float32) # Topologically Sorted Source Nodes: [y_7], Original ATen: [aten.convolution] stream0 = get_raw_stream(0) triton_poi_fused_convolution_0.run(primals_1, buf0, 576, grid=grid(576), stream=stream0) del primals_1 # Topologically Sorted Source Nodes: [y_7], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (16, 1, 3, 3), (9, 9, 3, 1)) del buf0 del primals_2 buf2 = empty_strided_cuda((16, 1, 4, 4), (16, 1, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [y_9], Original ATen: [aten._unsafe_index] triton_poi_fused__unsafe_index_1.run(buf1, buf2, 256, grid=grid(256), stream=stream0) del buf1 buf3 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul] triton_poi_fused_mul_2.run(primals_3, buf3, 16, grid=grid(16), stream=stream0) del primals_3 buf4 = empty_strided_cuda((4, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [mul_1, x1], Original ATen: [aten.mul, aten.convolution] triton_poi_fused_convolution_mul_3.run(primals_4, buf4, 4, grid=grid(4), stream=stream0) del primals_4 # Topologically Sorted Source Nodes: [mul_1, x1], Original ATen: [aten.mul, aten.convolution] buf5 = extern_kernels.convolution(reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0), buf3, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf5, (4, 4, 2, 2), (16, 4, 2, 1)) buf6 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32) buf7 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.bool) # Topologically Sorted Source Nodes: [mul_1, x1, out, wrapped_sqrt, out_1], Original ATen: [aten.mul, aten.convolution, aten.leaky_relu, aten.sqrt, aten.leaky_relu_backward] triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_mul_sqrt_4.run(buf5, buf4, buf6, buf7, 64, grid=grid(64), stream=stream0) del buf4 del buf5 return (buf6, reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0), buf3, buf7, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((1, 1, 4, 4), (16, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import numpy as np import torch.nn as nn import torch.nn.functional as F def _setup_kernel(k): k = np.asarray(k, dtype=np.float32) if k.ndim == 1: k = np.outer(k, k) k /= np.sum(k) assert k.ndim == 2 assert k.shape[0] == k.shape[1] return k class ConvDownsample2d(nn.Module): def __init__(self, kernel_size, input_channels, output_channels, k=[1, 3, 3, 1], factor=2, gain=1, use_wscale=True, lrmul=1, bias=True): """ ConvDownsample2D method in D_stylegan2. :param k: FIR filter of the shape `[firH, firW]` or `[firN]` (separable). The default is `[1] * factor`, which corresponds to average pooling. :param factor: Integer downsampling factor (default: 2). :param gain: Scaling factor for signal magnitude (default: 1.0). Returns: Tensor of the shape `[N, C, H // factor, W // factor]` """ super().__init__() assert isinstance(factor, int ) and factor >= 1, 'factor must be larger than 1! (default: 2)' assert kernel_size >= 1 and kernel_size % 2 == 1 he_std = gain / (input_channels * output_channels * kernel_size * kernel_size) ** 0.5 self.kernel_size = kernel_size if use_wscale: init_std = 1.0 / lrmul self.w_lrmul = he_std * lrmul else: init_std = he_std / lrmul self.w_lrmul = lrmul self.weight = torch.nn.Parameter(torch.randn(output_channels, input_channels, kernel_size, kernel_size) * init_std) self.convH, self.convW = self.weight.shape[2:] if bias: self.bias = torch.nn.Parameter(torch.zeros(output_channels)) self.b_lrmul = lrmul else: self.bias = None self.gain = gain self.factor = factor self.k = _setup_kernel(k) * self.gain self.k = torch.FloatTensor(self.k).unsqueeze(0).unsqueeze(0) self.k = nn.Parameter(self.k, requires_grad=False) self.p = self.k.shape[-1] - self.factor + (self.convW - 1) self.padx0, self.pady0 = (self.p + 1) // 2, (self.p + 1) // 2 self.padx1, self.pady1 = self.p // 2, self.p // 2 self.kernelH, self.kernelW = self.k.shape[2:] def forward(self, x): y = x.clone() y = y.reshape([-1, x.shape[2], x.shape[3], 1]) inC, inH, inW = x.shape[1:] y = torch.reshape(y, (-1, inH, inW, 1)) y = F.pad(y, (0, 0, max(self.pady0, 0), max(self.pady1, 0), max( self.padx0, 0), max(self.padx1, 0), 0, 0)) y = y[:, max(-self.pady0, 0):y.shape[1] - max(-self.pady1, 0), max( -self.padx0, 0):y.shape[2] - max(-self.padx1, 0), :] y = y.permute(0, 3, 1, 2) y = y.reshape(-1, 1, inH + self.pady0 + self.pady1, inW + self. padx0 + self.padx1) y = F.conv2d(y, self.k) y = y.view(-1, 1, inH + self.pady0 + self.pady1 - self.kernelH + 1, inW + self.padx0 + self.padx1 - self.kernelW + 1) if inH != y.shape[1]: y = F.interpolate(y, size=(inH, inW)) y = y.permute(0, 2, 3, 1) y = y.reshape(-1, inC, inH, inW) x1 = F.conv2d(y, self.weight * self.w_lrmul, self.bias * self. b_lrmul, stride=self.factor, padding=self.convW // 2) out = F.leaky_relu(x1, 0.2, inplace=True) out = out * np.sqrt(2) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'kernel_size': 1, 'input_channels': 4, 'output_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import numpy as np import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 6 % 6 x0 = xindex % 6 x2 = xindex // 36 x4 = xindex tmp0 = -1 + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = -1 + x0 tmp6 = tmp5 >= tmp1 tmp7 = tmp5 < tmp3 tmp8 = tmp2 & tmp4 tmp9 = tmp8 & tmp6 tmp10 = tmp9 & tmp7 tmp11 = tl.load(in_ptr0 + (-5 + x0 + 4 * x1 + 16 * x2), tmp10 & xmask, other=0.0) tl.store(out_ptr0 + x4, tmp11, xmask) @triton.jit def triton_poi_fused__unsafe_index_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 4 x0 = xindex % 4 x2 = xindex // 16 x4 = xindex tmp0 = x1 tmp1 = tmp0.to(tl.float32) tmp2 = 0.75 tmp3 = tmp1 * tmp2 tmp4 = tmp3.to(tl.int32) tmp5 = x0 tmp6 = tmp5.to(tl.float32) tmp7 = tmp6 * tmp2 tmp8 = tmp7.to(tl.int32) tmp9 = tl.load(in_ptr0 + (tmp8 + 3 * tmp4 + 9 * x2), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x4, tmp9, xmask) @triton.jit def triton_poi_fused_mul_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.25 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_convolution_mul_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_mul_sqrt_4( in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tmp8 = 1.414213562373095 tmp9 = tmp7 * tmp8 tmp10 = tmp7 > tmp3 tl.store(out_ptr0 + x3, tmp9, xmask) tl.store(out_ptr1 + x3, tmp10, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1, 1, 4, 4), (16, 16, 4, 1)) assert_size_stride(primals_3, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_4, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 1, 6, 6), (36, 6, 6, 1), torch.float32) get_raw_stream(0) triton_poi_fused_convolution_0[grid(576)](primals_1, buf0, 576, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (16, 1, 3, 3), (9, 9, 3, 1)) del buf0 del primals_2 buf2 = empty_strided_cuda((16, 1, 4, 4), (16, 1, 4, 1), torch.float32) triton_poi_fused__unsafe_index_1[grid(256)](buf1, buf2, 256, XBLOCK =256, num_warps=4, num_stages=1) del buf1 buf3 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32) triton_poi_fused_mul_2[grid(16)](primals_3, buf3, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_3 buf4 = empty_strided_cuda((4,), (1,), torch.float32) triton_poi_fused_convolution_mul_3[grid(4)](primals_4, buf4, 4, XBLOCK=4, num_warps=1, num_stages=1) del primals_4 buf5 = extern_kernels.convolution(reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0), buf3, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf5, (4, 4, 2, 2), (16, 4, 2, 1)) buf6 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32) buf7 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.bool) triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_mul_sqrt_4[ grid(64)](buf5, buf4, buf6, buf7, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf4 del buf5 return buf6, reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), buf3, buf7 def _setup_kernel(k): k = np.asarray(k, dtype=np.float32) if k.ndim == 1: k = np.outer(k, k) k /= np.sum(k) assert k.ndim == 2 assert k.shape[0] == k.shape[1] return k class ConvDownsample2dNew(nn.Module): def __init__(self, kernel_size, input_channels, output_channels, k=[1, 3, 3, 1], factor=2, gain=1, use_wscale=True, lrmul=1, bias=True): """ ConvDownsample2D method in D_stylegan2. :param k: FIR filter of the shape `[firH, firW]` or `[firN]` (separable). The default is `[1] * factor`, which corresponds to average pooling. :param factor: Integer downsampling factor (default: 2). :param gain: Scaling factor for signal magnitude (default: 1.0). Returns: Tensor of the shape `[N, C, H // factor, W // factor]` """ super().__init__() assert isinstance(factor, int ) and factor >= 1, 'factor must be larger than 1! (default: 2)' assert kernel_size >= 1 and kernel_size % 2 == 1 he_std = gain / (input_channels * output_channels * kernel_size * kernel_size) ** 0.5 self.kernel_size = kernel_size if use_wscale: init_std = 1.0 / lrmul self.w_lrmul = he_std * lrmul else: init_std = he_std / lrmul self.w_lrmul = lrmul self.weight = torch.nn.Parameter(torch.randn(output_channels, input_channels, kernel_size, kernel_size) * init_std) self.convH, self.convW = self.weight.shape[2:] if bias: self.bias = torch.nn.Parameter(torch.zeros(output_channels)) self.b_lrmul = lrmul else: self.bias = None self.gain = gain self.factor = factor self.k = _setup_kernel(k) * self.gain self.k = torch.FloatTensor(self.k).unsqueeze(0).unsqueeze(0) self.k = nn.Parameter(self.k, requires_grad=False) self.p = self.k.shape[-1] - self.factor + (self.convW - 1) self.padx0, self.pady0 = (self.p + 1) // 2, (self.p + 1) // 2 self.padx1, self.pady1 = self.p // 2, self.p // 2 self.kernelH, self.kernelW = self.k.shape[2:] def forward(self, input_0): primals_3 = self.weight primals_4 = self.bias primals_2 = self.k primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
Iceland-Leo/StyleGAN2_PyTorch
ConvDownsample2d
false
5,337
[ "MIT" ]
1
3621f5e4ba1c7fde7e2fae1f4700d050656a0b02
https://github.com/Iceland-Leo/StyleGAN2_PyTorch/tree/3621f5e4ba1c7fde7e2fae1f4700d050656a0b02
import torch import numpy as np import torch.nn as nn import torch.nn.functional as F def _setup_kernel(k): k = np.asarray(k, dtype=np.float32) if k.ndim == 1: k = np.outer(k, k) k /= np.sum(k) assert k.ndim == 2 assert k.shape[0] == k.shape[1] return k class Model(nn.Module): def __init__(self, kernel_size, input_channels, output_channels, k=[1, 3, 3, 1], factor=2, gain=1, use_wscale=True, lrmul=1, bias=True): """ ConvDownsample2D method in D_stylegan2. :param k: FIR filter of the shape `[firH, firW]` or `[firN]` (separable). The default is `[1] * factor`, which corresponds to average pooling. :param factor: Integer downsampling factor (default: 2). :param gain: Scaling factor for signal magnitude (default: 1.0). Returns: Tensor of the shape `[N, C, H // factor, W // factor]` """ super().__init__() assert isinstance(factor, int ) and factor >= 1, 'factor must be larger than 1! (default: 2)' assert kernel_size >= 1 and kernel_size % 2 == 1 he_std = gain / (input_channels * output_channels * kernel_size * kernel_size) ** 0.5 self.kernel_size = kernel_size if use_wscale: init_std = 1.0 / lrmul self.w_lrmul = he_std * lrmul else: init_std = he_std / lrmul self.w_lrmul = lrmul self.weight = torch.nn.Parameter(torch.randn(output_channels, input_channels, kernel_size, kernel_size) * init_std) self.convH, self.convW = self.weight.shape[2:] if bias: self.bias = torch.nn.Parameter(torch.zeros(output_channels)) self.b_lrmul = lrmul else: self.bias = None self.gain = gain self.factor = factor self.k = _setup_kernel(k) * self.gain self.k = torch.FloatTensor(self.k).unsqueeze(0).unsqueeze(0) self.k = nn.Parameter(self.k, requires_grad=False) self.p = self.k.shape[-1] - self.factor + (self.convW - 1) self.padx0, self.pady0 = (self.p + 1) // 2, (self.p + 1) // 2 self.padx1, self.pady1 = self.p // 2, self.p // 2 self.kernelH, self.kernelW = self.k.shape[2:] def forward(self, x): y = x.clone() y = y.reshape([-1, x.shape[2], x.shape[3], 1]) inC, inH, inW = x.shape[1:] y = torch.reshape(y, (-1, inH, inW, 1)) y = F.pad(y, (0, 0, max(self.pady0, 0), max(self.pady1, 0), max( self.padx0, 0), max(self.padx1, 0), 0, 0)) y = y[:, max(-self.pady0, 0):y.shape[1] - max(-self.pady1, 0), max( -self.padx0, 0):y.shape[2] - max(-self.padx1, 0), :] y = y.permute(0, 3, 1, 2) y = y.reshape(-1, 1, inH + self.pady0 + self.pady1, inW + self. padx0 + self.padx1) y = F.conv2d(y, self.k) y = y.view(-1, 1, inH + self.pady0 + self.pady1 - self.kernelH + 1, inW + self.padx0 + self.padx1 - self.kernelW + 1) if inH != y.shape[1]: y = F.interpolate(y, size=(inH, inW)) y = y.permute(0, 2, 3, 1) y = y.reshape(-1, inC, inH, inW) x1 = F.conv2d(y, self.weight * self.w_lrmul, self.bias * self. b_lrmul, stride=self.factor, padding=self.convW // 2) out = F.leaky_relu(x1, 0.2, inplace=True) out = out * np.sqrt(2) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [1, 4, 4]
Generator
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/o4/co4zyxcl2qkqwco2hpqjmusaq55kf7f6hyk5tkf5vs6buom4p47w.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.leaky_relu] # Source node to ATen node mapping: # x => gt, mul, where # Graph fragment: # %gt : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view_1, 0), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 0.2), kwargs = {}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %view_1, %mul), kwargs = {}) triton_poi_fused_leaky_relu_0 = async_compile.triton('triton_poi_fused_leaky_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + (x2), tmp4, xmask) tl.store(out_ptr1 + (x2), tmp7, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/lp/clpwjuezdlkcdq7xatgdbq3fr2kivvvih5oocxq5g24pjhc2q2ey.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.leaky_relu] # Source node to ATen node mapping: # x_1 => gt_1, mul_1, where_1 # Graph fragment: # %gt_1 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view_3, 0), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_3, 0.2), kwargs = {}) # %where_1 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %view_3, %mul_1), kwargs = {}) triton_poi_fused_leaky_relu_1 = async_compile.triton('triton_poi_fused_leaky_relu_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_leaky_relu_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 8 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + (x2), tmp4, xmask) tl.store(out_ptr1 + (x2), tmp7, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/oe/coej3bxen3jfnunnc4dynwxrn45eswax7gc7dhzaxe55hrlkduyr.py # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.leaky_relu] # Source node to ATen node mapping: # x_2 => gt_2, mul_2, where_2 # Graph fragment: # %gt_2 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view_5, 0), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_5, 0.2), kwargs = {}) # %where_2 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_2, %view_5, %mul_2), kwargs = {}) triton_poi_fused_leaky_relu_2 = async_compile.triton('triton_poi_fused_leaky_relu_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_leaky_relu_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + (x2), tmp4, xmask) tl.store(out_ptr1 + (x2), tmp7, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/sb/csbbn2sf7v6eutogmmhksp5x7rwd7z3vppgfx2mbu7v3njic7bqh.py # Topologically Sorted Source Nodes: [tanh], Original ATen: [aten.tanh] # Source node to ATen node mapping: # tanh => tanh # Graph fragment: # %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%view_7,), kwargs = {}) triton_poi_fused_tanh_3 = async_compile.triton('triton_poi_fused_tanh_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_tanh_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + (x2), tmp3, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (8, 4), (4, 1)) assert_size_stride(primals_5, (8, ), (1, )) assert_size_stride(primals_6, (16, 8), (8, 1)) assert_size_stride(primals_7, (16, ), (1, )) assert_size_stride(primals_8, (4, 16), (16, 1)) assert_size_stride(primals_9, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.leaky_relu] stream0 = get_raw_stream(0) triton_poi_fused_leaky_relu_0.run(buf0, primals_2, buf1, buf2, 256, grid=grid(256), stream=stream0) del primals_2 buf3 = empty_strided_cuda((64, 8), (8, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 8), (1, 4), 0), out=buf3) buf4 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.bool) buf5 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.float32) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.leaky_relu] triton_poi_fused_leaky_relu_1.run(buf3, primals_5, buf4, buf5, 512, grid=grid(512), stream=stream0) del buf3 del primals_5 buf6 = empty_strided_cuda((64, 16), (16, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf5, (64, 8), (8, 1), 0), reinterpret_tensor(primals_6, (8, 16), (1, 8), 0), out=buf6) buf7 = empty_strided_cuda((4, 4, 4, 16), (256, 64, 16, 1), torch.bool) buf8 = empty_strided_cuda((4, 4, 4, 16), (256, 64, 16, 1), torch.float32) # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.leaky_relu] triton_poi_fused_leaky_relu_2.run(buf6, primals_7, buf7, buf8, 1024, grid=grid(1024), stream=stream0) del buf6 del primals_7 buf9 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf8, (64, 16), (16, 1), 0), reinterpret_tensor(primals_8, (16, 4), (1, 16), 0), out=buf9) buf10 = reinterpret_tensor(buf9, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf9 # reuse # Topologically Sorted Source Nodes: [tanh], Original ATen: [aten.tanh] triton_poi_fused_tanh_3.run(buf10, primals_9, 256, grid=grid(256), stream=stream0) del primals_9 return (buf10, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf1, reinterpret_tensor(buf2, (64, 4), (4, 1), 0), buf4, reinterpret_tensor(buf5, (64, 8), (8, 1), 0), buf7, reinterpret_tensor(buf8, (64, 16), (16, 1), 0), buf10, primals_8, primals_6, primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((8, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((16, 8), (8, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, 16), (16, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.data class Generator(nn.Module): def __init__(self, input_size, hidden_size, output_size): super(Generator, self).__init__() self.fc1 = nn.Linear(input_size, hidden_size) self.fc2 = nn.Linear(hidden_size, hidden_size * 2) self.fc3 = nn.Linear(hidden_size * 2, hidden_size * 4) self.fc4 = nn.Linear(hidden_size * 4, output_size) self.drop = nn.Dropout(p=0.3) def forward(self, x): x = F.leaky_relu(self.fc1(x), negative_slope=0.2) x = F.leaky_relu(self.fc2(x), negative_slope=0.2) x = F.leaky_relu(self.fc3(x), negative_slope=0.2) x = self.drop(x) return torch.tanh(self.fc4(x)) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_size': 4, 'hidden_size': 4, 'output_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr1 + x2, tmp7, xmask) @triton.jit def triton_poi_fused_leaky_relu_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 8 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr1 + x2, tmp7, xmask) @triton.jit def triton_poi_fused_leaky_relu_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr1 + x2, tmp7, xmask) @triton.jit def triton_poi_fused_tanh_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (8, 4), (4, 1)) assert_size_stride(primals_5, (8,), (1,)) assert_size_stride(primals_6, (16, 8), (8, 1)) assert_size_stride(primals_7, (16,), (1,)) assert_size_stride(primals_8, (4, 16), (16, 1)) assert_size_stride(primals_9, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_leaky_relu_0[grid(256)](buf0, primals_2, buf1, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf3 = empty_strided_cuda((64, 8), (8, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 8), (1, 4), 0), out=buf3) buf4 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.bool) buf5 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.float32) triton_poi_fused_leaky_relu_1[grid(512)](buf3, primals_5, buf4, buf5, 512, XBLOCK=128, num_warps=4, num_stages=1) del buf3 del primals_5 buf6 = empty_strided_cuda((64, 16), (16, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf5, (64, 8), (8, 1), 0), reinterpret_tensor(primals_6, (8, 16), (1, 8), 0), out=buf6) buf7 = empty_strided_cuda((4, 4, 4, 16), (256, 64, 16, 1), torch.bool) buf8 = empty_strided_cuda((4, 4, 4, 16), (256, 64, 16, 1), torch. float32) triton_poi_fused_leaky_relu_2[grid(1024)](buf6, primals_7, buf7, buf8, 1024, XBLOCK=256, num_warps=4, num_stages=1) del buf6 del primals_7 buf9 = buf0 del buf0 extern_kernels.mm(reinterpret_tensor(buf8, (64, 16), (16, 1), 0), reinterpret_tensor(primals_8, (16, 4), (1, 16), 0), out=buf9) buf10 = reinterpret_tensor(buf9, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf9 triton_poi_fused_tanh_3[grid(256)](buf10, primals_9, 256, XBLOCK= 128, num_warps=4, num_stages=1) del primals_9 return buf10, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf1, reinterpret_tensor(buf2, (64, 4), (4, 1), 0 ), buf4, reinterpret_tensor(buf5, (64, 8), (8, 1), 0 ), buf7, reinterpret_tensor(buf8, (64, 16), (16, 1), 0 ), buf10, primals_8, primals_6, primals_4 class GeneratorNew(nn.Module): def __init__(self, input_size, hidden_size, output_size): super(GeneratorNew, self).__init__() self.fc1 = nn.Linear(input_size, hidden_size) self.fc2 = nn.Linear(hidden_size, hidden_size * 2) self.fc3 = nn.Linear(hidden_size * 2, hidden_size * 4) self.fc4 = nn.Linear(hidden_size * 4, output_size) self.drop = nn.Dropout(p=0.3) def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_6 = self.fc3.weight primals_7 = self.fc3.bias primals_8 = self.fc4.weight primals_9 = self.fc4.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
Iamsdt/UdacityDeepLearningNanodegree
Generator
false
5,338
[ "Apache-2.0" ]
1
507c2ce620f42e36271549471b819d3d7fceb1b6
https://github.com/Iamsdt/UdacityDeepLearningNanodegree/tree/507c2ce620f42e36271549471b819d3d7fceb1b6
import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.data class Model(nn.Module): def __init__(self, input_size, hidden_size, output_size): super().__init__() self.fc1 = nn.Linear(input_size, hidden_size) self.fc2 = nn.Linear(hidden_size, hidden_size * 2) self.fc3 = nn.Linear(hidden_size * 2, hidden_size * 4) self.fc4 = nn.Linear(hidden_size * 4, output_size) self.drop = nn.Dropout(p=0.3) def forward(self, x): x = F.leaky_relu(self.fc1(x), negative_slope=0.2) x = F.leaky_relu(self.fc2(x), negative_slope=0.2) x = F.leaky_relu(self.fc3(x), negative_slope=0.2) x = self.drop(x) return torch.tanh(self.fc4(x)) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 4, 4]
GeneratorBlock
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/wi/cwiyl3lwwtancorrifw77xt3aqb4lermdintht45zvkj3bg54nbl.py # Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul] # Source node to ATen node mapping: # mul => mul # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, 0.5), kwargs = {}) triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/wz/cwznwckxjekzcxlkjcgq2jhezeju57iivq5nhdjaezb3fvcxcau2.py # Topologically Sorted Source Nodes: [weights_1, pow_1, sum_1, add, sigma_inv, weights_2], Original ATen: [aten.mul, aten.pow, aten.sum, aten.add, aten.rsqrt] # Source node to ATen node mapping: # add => add # pow_1 => pow_1 # sigma_inv => rsqrt # sum_1 => sum_1 # weights_1 => mul_2 # weights_2 => mul_3 # Graph fragment: # %mul_2 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%unsqueeze_3, %unsqueeze_2), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%mul_2, 2), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [2, 3, 4], True), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_1, 1e-08), kwargs = {}) # %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_2, %rsqrt), kwargs = {}) triton_per_fused_add_mul_pow_rsqrt_sum_1 = async_compile.triton('triton_per_fused_add_mul_pow_rsqrt_sum_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[16, 64], reduction_hint=ReductionHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_mul_pow_rsqrt_sum_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_mul_pow_rsqrt_sum_1(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 16 rnumel = 36 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = rindex < rnumel r5 = rindex x0 = xindex % 4 r3 = (rindex // 9) x1 = (xindex // 4) x4 = xindex tmp0 = tl.load(in_ptr0 + (r5 + (36*x0)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp3 = tl.load(in_ptr1 + (r3 + (4*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp1 = 0.16666666666666666 tmp2 = tmp0 * tmp1 tmp4 = tmp2 * tmp3 tmp5 = tmp4 * tmp4 tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp8 = tl.where(rmask & xmask, tmp6, 0) tmp9 = tl.sum(tmp8, 1)[:, None] tmp10 = 1e-08 tmp11 = tmp9 + tmp10 tmp12 = libdevice.rsqrt(tmp11) tmp13 = tmp4 * tmp12 tl.debug_barrier() tl.store(in_out_ptr0 + (x4), tmp12, xmask) tl.store(out_ptr0 + (r5 + (36*x4)), tmp13, rmask & xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/mk/cmk6aftvifk5vtxtg2lvcslfabqnrroho76zhtuahfdwmzysuwiw.py # Topologically Sorted Source Nodes: [mul_4, x_3, add_2, x_4], Original ATen: [aten.mul, aten.add, aten.leaky_relu, aten.leaky_relu_backward] # Source node to ATen node mapping: # add_2 => add_2 # mul_4 => mul_4 # x_3 => add_1 # x_4 => gt, mul_5, where # Graph fragment: # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%unsqueeze_6, %select), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_2, %mul_4), kwargs = {}) # %add_2 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, %unsqueeze_9), kwargs = {}) # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%add_2, 0), kwargs = {}) # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_2, 0.2), kwargs = {}) # %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt, %add_2, %mul_5), kwargs = {}) # %gt_5 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%where, 0), kwargs = {}) triton_poi_fused_add_leaky_relu_leaky_relu_backward_mul_2 = async_compile.triton('triton_poi_fused_add_leaky_relu_leaky_relu_backward_mul_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*i1', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_leaky_relu_leaky_relu_backward_mul_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_leaky_relu_leaky_relu_backward_mul_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 4 x2 = (xindex // 16) % 4 tmp0 = tl.load(in_out_ptr0 + (x4), xmask) tmp1 = tl.load(in_ptr0 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr2 + (x2), xmask, eviction_policy='evict_last') tmp4 = tmp2 * tmp3 tmp5 = tmp0 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = 0.0 tmp9 = tmp7 > tmp8 tmp10 = 0.2 tmp11 = tmp7 * tmp10 tmp12 = tl.where(tmp9, tmp7, tmp11) tmp13 = tmp12 > tmp8 tl.store(in_out_ptr0 + (x4), tmp12, xmask) tl.store(out_ptr0 + (x4), tmp13, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/cy/ccy6r6r4kfbnktjfnh5b72535yhbr4r6gtvhdjage2wywifwywz3.py # Topologically Sorted Source Nodes: [mul_9, x_8, add_5, x_9], Original ATen: [aten.mul, aten.add, aten.leaky_relu] # Source node to ATen node mapping: # add_5 => add_5 # mul_9 => mul_10 # x_8 => add_4 # x_9 => gt_1, mul_11, where_1 # Graph fragment: # %mul_10 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%unsqueeze_16, %select_1), kwargs = {}) # %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_6, %mul_10), kwargs = {}) # %add_5 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_4, %unsqueeze_19), kwargs = {}) # %gt_1 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%add_5, 0), kwargs = {}) # %mul_11 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_5, 0.2), kwargs = {}) # %where_1 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %add_5, %mul_11), kwargs = {}) triton_poi_fused_add_leaky_relu_mul_3 = async_compile.triton('triton_poi_fused_add_leaky_relu_mul_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_leaky_relu_mul_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_leaky_relu_mul_3(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 4 x2 = (xindex // 16) % 4 tmp0 = tl.load(in_out_ptr0 + (x4), xmask) tmp1 = tl.load(in_ptr0 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr2 + (x2), xmask, eviction_policy='evict_last') tmp4 = tmp2 * tmp3 tmp5 = tmp0 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = 0.0 tmp9 = tmp7 > tmp8 tmp10 = 0.2 tmp11 = tmp7 * tmp10 tmp12 = tl.where(tmp9, tmp7, tmp11) tl.store(in_out_ptr0 + (x4), tmp12, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/tx/ctxhwgwusrrp4juzika66nlcnovoipxygsfsg3yjbj3mt4mcuato.py # Topologically Sorted Source Nodes: [weights_9], Original ATen: [aten.mul] # Source node to ATen node mapping: # weights_9 => mul_14 # Graph fragment: # %mul_14 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%unsqueeze_23, %unsqueeze_22), kwargs = {}) triton_poi_fused_mul_4 = async_compile.triton('triton_poi_fused_mul_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_4(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 48 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex % 12 x0 = xindex % 4 x2 = (xindex // 12) x4 = xindex tmp0 = tl.load(in_ptr0 + (x3), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x0 + (4*x2)), xmask, eviction_policy='evict_last') tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + (x4), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/lb/clbrnk753pncxxhspnazc3m5bofaqbod7plrixya7xdr3sd6jhgx.py # Topologically Sorted Source Nodes: [add_6, rgb], Original ATen: [aten.add, aten.leaky_relu, aten.leaky_relu_backward] # Source node to ATen node mapping: # add_6 => add_6 # rgb => gt_2, mul_15, where_2 # Graph fragment: # %add_6 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_10, %unsqueeze_26), kwargs = {}) # %gt_2 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%add_6, 0), kwargs = {}) # %mul_15 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_6, 0.2), kwargs = {}) # %where_2 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_2, %add_6, %mul_15), kwargs = {}) # %gt_3 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%where_2, 0), kwargs = {}) triton_poi_fused_add_leaky_relu_leaky_relu_backward_5 = async_compile.triton('triton_poi_fused_add_leaky_relu_leaky_relu_backward_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_leaky_relu_leaky_relu_backward_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_leaky_relu_leaky_relu_backward_5(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr0 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = 0.0 tmp5 = tmp3 > tmp4 tmp6 = 0.2 tmp7 = tmp3 * tmp6 tmp8 = tl.where(tmp5, tmp3, tmp7) tmp9 = tmp8 > tmp4 tl.store(in_out_ptr0 + (x0), tmp8, xmask) tl.store(out_ptr0 + (x0), tmp9, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_6, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_7, (1, ), (1, )) assert_size_stride(primals_8, (4, ), (1, )) assert_size_stride(primals_9, (4, 4), (4, 1)) assert_size_stride(primals_10, (4, ), (1, )) assert_size_stride(primals_11, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_12, (1, ), (1, )) assert_size_stride(primals_13, (4, ), (1, )) assert_size_stride(primals_14, (4, 4), (4, 1)) assert_size_stride(primals_15, (4, ), (1, )) assert_size_stride(primals_16, (3, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_17, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_mul_0.run(primals_2, buf0, 16, grid=grid(16), stream=stream0) del primals_2 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [s], Original ATen: [aten.addmm] extern_kernels.addmm(primals_3, primals_4, reinterpret_tensor(buf0, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_3 buf2 = reinterpret_tensor(buf0, (4, 4, 1, 1, 1), (4, 1, 16, 16, 16), 0); del buf0 # reuse buf3 = reinterpret_tensor(buf2, (4, 4, 1, 1, 1), (4, 1, 1, 1, 1), 0); del buf2 # reuse buf4 = empty_strided_cuda((4, 4, 4, 3, 3), (144, 36, 9, 3, 1), torch.float32) # Topologically Sorted Source Nodes: [weights_1, pow_1, sum_1, add, sigma_inv, weights_2], Original ATen: [aten.mul, aten.pow, aten.sum, aten.add, aten.rsqrt] triton_per_fused_add_mul_pow_rsqrt_sum_1.run(buf3, primals_6, buf1, buf4, 16, 36, grid=grid(16), stream=stream0) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution] buf5 = extern_kernels.convolution(reinterpret_tensor(primals_5, (1, 16, 4, 4), (256, 16, 4, 1), 0), reinterpret_tensor(buf4, (16, 4, 3, 3), (36, 9, 3, 1), 0), stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=4, bias=None) assert_size_stride(buf5, (1, 16, 4, 4), (256, 16, 4, 1)) buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul_5], Original ATen: [aten.mul] triton_poi_fused_mul_0.run(primals_9, buf6, 16, grid=grid(16), stream=stream0) del primals_9 buf7 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [s_2], Original ATen: [aten.addmm] extern_kernels.addmm(primals_10, primals_4, reinterpret_tensor(buf6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf7) del primals_10 buf8 = reinterpret_tensor(buf6, (4, 4, 1, 1, 1), (4, 1, 16, 16, 16), 0); del buf6 # reuse buf9 = reinterpret_tensor(buf8, (4, 4, 1, 1, 1), (4, 1, 1, 1, 1), 0); del buf8 # reuse buf10 = empty_strided_cuda((4, 4, 4, 3, 3), (144, 36, 9, 3, 1), torch.float32) # Topologically Sorted Source Nodes: [weights_5, pow_2, sum_2, add_3, sigma_inv_1, weights_6], Original ATen: [aten.mul, aten.pow, aten.sum, aten.add, aten.rsqrt] triton_per_fused_add_mul_pow_rsqrt_sum_1.run(buf9, primals_11, buf7, buf10, 16, 36, grid=grid(16), stream=stream0) buf11 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf5 # reuse buf20 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [mul_4, x_3, add_2, x_4], Original ATen: [aten.mul, aten.add, aten.leaky_relu, aten.leaky_relu_backward] triton_poi_fused_add_leaky_relu_leaky_relu_backward_mul_2.run(buf11, primals_7, primals_1, primals_8, buf20, 256, grid=grid(256), stream=stream0) del primals_7 del primals_8 # Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.convolution] buf12 = extern_kernels.convolution(reinterpret_tensor(buf11, (1, 16, 4, 4), (0, 16, 4, 1), 0), reinterpret_tensor(buf10, (16, 4, 3, 3), (36, 9, 3, 1), 0), stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=4, bias=None) assert_size_stride(buf12, (1, 16, 4, 4), (256, 16, 4, 1)) buf13 = reinterpret_tensor(buf12, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf12 # reuse # Topologically Sorted Source Nodes: [mul_9, x_8, add_5, x_9], Original ATen: [aten.mul, aten.add, aten.leaky_relu] triton_poi_fused_add_leaky_relu_mul_3.run(buf13, primals_12, primals_1, primals_13, 256, grid=grid(256), stream=stream0) del primals_12 del primals_13 buf14 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul_10], Original ATen: [aten.mul] triton_poi_fused_mul_0.run(primals_14, buf14, 16, grid=grid(16), stream=stream0) del primals_14 buf15 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [style], Original ATen: [aten.addmm] extern_kernels.addmm(primals_15, primals_4, reinterpret_tensor(buf14, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf15) del buf14 del primals_15 buf16 = empty_strided_cuda((4, 3, 4, 1, 1), (12, 4, 1, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [weights_9], Original ATen: [aten.mul] triton_poi_fused_mul_4.run(primals_16, buf15, buf16, 48, grid=grid(48), stream=stream0) # Topologically Sorted Source Nodes: [x_11], Original ATen: [aten.convolution] buf17 = extern_kernels.convolution(reinterpret_tensor(buf13, (1, 16, 4, 4), (256, 16, 4, 1), 0), reinterpret_tensor(buf16, (12, 4, 1, 1), (4, 1, 0, 0), 0), stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=4, bias=None) assert_size_stride(buf17, (1, 12, 4, 4), (192, 16, 4, 1)) buf18 = reinterpret_tensor(buf17, (4, 3, 4, 4), (48, 16, 4, 1), 0); del buf17 # reuse buf19 = empty_strided_cuda((4, 3, 4, 4), (48, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [add_6, rgb], Original ATen: [aten.add, aten.leaky_relu, aten.leaky_relu_backward] triton_poi_fused_add_leaky_relu_leaky_relu_backward_5.run(buf18, primals_17, buf19, 192, grid=grid(192), stream=stream0) del primals_17 return (buf13, buf18, primals_4, primals_6, primals_11, primals_16, reinterpret_tensor(primals_1, (4, ), (1, ), 0), buf1, buf3, reinterpret_tensor(primals_5, (1, 16, 4, 4), (256, 16, 4, 1), 0), reinterpret_tensor(buf4, (16, 4, 3, 3), (36, 9, 3, 1), 0), reinterpret_tensor(primals_1, (4, ), (1, ), 4), buf7, buf9, reinterpret_tensor(buf10, (16, 4, 3, 3), (36, 9, 3, 1), 0), reinterpret_tensor(buf11, (1, 16, 4, 4), (256, 16, 4, 1), 0), buf13, buf15, reinterpret_tensor(buf16, (12, 4, 1, 1), (4, 1, 1, 1), 0), buf19, buf20, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_13 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_14 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_15 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_16 = rand_strided((3, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_17 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch import numpy as np from torch import nn import torch.nn.functional as F import torch.utils.data from typing import Tuple from typing import Optional from typing import List import torch.autograd class EqualizedWeight(nn.Module): """ <a id="equalized_weight"></a> ## Learning-rate Equalized Weights Parameter This is based on equalized learning rate introduced in the Progressive GAN paper. Instead of initializing weights at $\\mathcal{N}(0,c)$ they initialize weights to $\\mathcal{N}(0, 1)$ and then multiply them by $c$ when using it. $$w_i = c \\hat{w}_i$$ The gradients on stored parameters $\\hat{w}$ get multiplied by $c$ but this doesn't have an affect since optimizers such as Adam normalize them by a running mean of the squared gradients. The optimizer updates on $\\hat{w}$ are proportionate to the learning rate $\\lambda$. But the effective weights $w$ get updated proportionately to $c \\lambda$. Without equalized learning rate, the effective weights will get updated proportionately to just $\\lambda$. So we are effectively scaling the learning rate by $c$ for these weight parameters. """ def __init__(self, shape: 'List[int]'): """ * `shape` is the shape of the weight parameter """ super().__init__() self.c = 1 / math.sqrt(np.prod(shape[1:])) self.weight = nn.Parameter(torch.randn(shape)) def forward(self): return self.weight * self.c class EqualizedLinear(nn.Module): """ <a id="equalized_linear"></a> ## Learning-rate Equalized Linear Layer This uses [learning-rate equalized weights]($equalized_weights) for a linear layer. """ def __init__(self, in_features: 'int', out_features: 'int', bias: 'float'=0.0): """ * `in_features` is the number of features in the input feature map * `out_features` is the number of features in the output feature map * `bias` is the bias initialization constant """ super().__init__() self.weight = EqualizedWeight([out_features, in_features]) self.bias = nn.Parameter(torch.ones(out_features) * bias) def forward(self, x: 'torch.Tensor'): return F.linear(x, self.weight(), bias=self.bias) class Conv2dWeightModulate(nn.Module): """ ### Convolution with Weight Modulation and Demodulation This layer scales the convolution weights by the style vector and demodulates by normalizing it. """ def __init__(self, in_features: 'int', out_features: 'int', kernel_size: 'int', demodulate: 'float'=True, eps: 'float'=1e-08): """ * `in_features` is the number of features in the input feature map * `out_features` is the number of features in the output feature map * `kernel_size` is the size of the convolution kernel * `demodulate` is flag whether to normalize weights by its standard deviation * `eps` is the $\\epsilon$ for normalizing """ super().__init__() self.out_features = out_features self.demodulate = demodulate self.padding = (kernel_size - 1) // 2 self.weight = EqualizedWeight([out_features, in_features, kernel_size, kernel_size]) self.eps = eps def forward(self, x: 'torch.Tensor', s: 'torch.Tensor'): """ * `x` is the input feature map of shape `[batch_size, in_features, height, width]` * `s` is style based scaling tensor of shape `[batch_size, in_features]` """ b, _, h, w = x.shape s = s[:, None, :, None, None] weights = self.weight()[None, :, :, :, :] weights = weights * s if self.demodulate: sigma_inv = torch.rsqrt((weights ** 2).sum(dim=(2, 3, 4), keepdim=True) + self.eps) weights = weights * sigma_inv x = x.reshape(1, -1, h, w) _, _, *ws = weights.shape weights = weights.reshape(b * self.out_features, *ws) x = F.conv2d(x, weights, padding=self.padding, groups=b) return x.reshape(-1, self.out_features, h, w) class StyleBlock(nn.Module): """ <a id="style_block"></a> ### Style Block ![Style block](style_block.svg) *<small>$A$ denotes a linear layer. $B$ denotes a broadcast and scaling operation (noise is single channel).</small>* Style block has a weight modulation convolution layer. """ def __init__(self, d_latent: 'int', in_features: 'int', out_features: 'int' ): """ * `d_latent` is the dimensionality of $w$ * `in_features` is the number of features in the input feature map * `out_features` is the number of features in the output feature map """ super().__init__() self.to_style = EqualizedLinear(d_latent, in_features, bias=1.0) self.conv = Conv2dWeightModulate(in_features, out_features, kernel_size=3) self.scale_noise = nn.Parameter(torch.zeros(1)) self.bias = nn.Parameter(torch.zeros(out_features)) self.activation = nn.LeakyReLU(0.2, True) def forward(self, x: 'torch.Tensor', w: 'torch.Tensor', noise: 'Optional[torch.Tensor]'): """ * `x` is the input feature map of shape `[batch_size, in_features, height, width]` * `w` is $w$ with shape `[batch_size, d_latent]` * `noise` is a tensor of shape `[batch_size, 1, height, width]` """ s = self.to_style(w) x = self.conv(x, s) if noise is not None: x = x + self.scale_noise[None, :, None, None] * noise return self.activation(x + self.bias[None, :, None, None]) class ToRGB(nn.Module): """ <a id="to_rgb"></a> ### To RGB ![To RGB](to_rgb.svg) *<small>$A$ denotes a linear layer.</small>* Generates an RGB image from a feature map using $1 imes 1$ convolution. """ def __init__(self, d_latent: 'int', features: 'int'): """ * `d_latent` is the dimensionality of $w$ * `features` is the number of features in the feature map """ super().__init__() self.to_style = EqualizedLinear(d_latent, features, bias=1.0) self.conv = Conv2dWeightModulate(features, 3, kernel_size=1, demodulate=False) self.bias = nn.Parameter(torch.zeros(1)) self.activation = nn.LeakyReLU(0.2, True) def forward(self, x: 'torch.Tensor', w: 'torch.Tensor'): """ * `x` is the input feature map of shape `[batch_size, in_features, height, width]` * `w` is $w$ with shape `[batch_size, d_latent]` """ style = self.to_style(w) x = self.conv(x, style) return self.activation(x + self.bias[None, :, None, None]) class GeneratorBlock(nn.Module): """ <a id="generator_block"></a> ### Generator Block ![Generator block](generator_block.svg) *<small>$A$ denotes a linear layer. $B$ denotes a broadcast and scaling operation (noise is a single channel). [*toRGB*](#to_rgb) also has a style modulation which is not shown in the diagram to keep it simple.</small>* The generator block consists of two [style blocks](#style_block) ($3 imes 3$ convolutions with style modulation) and an RGB output. """ def __init__(self, d_latent: 'int', in_features: 'int', out_features: 'int' ): """ * `d_latent` is the dimensionality of $w$ * `in_features` is the number of features in the input feature map * `out_features` is the number of features in the output feature map """ super().__init__() self.style_block1 = StyleBlock(d_latent, in_features, out_features) self.style_block2 = StyleBlock(d_latent, out_features, out_features) self.to_rgb = ToRGB(d_latent, out_features) def forward(self, x: 'torch.Tensor', w: 'torch.Tensor', noise: 'Tuple[Optional[torch.Tensor], Optional[torch.Tensor]]'): """ * `x` is the input feature map of shape `[batch_size, in_features, height, width]` * `w` is $w$ with shape `[batch_size, d_latent]` * `noise` is a tuple of two noise tensors of shape `[batch_size, 1, height, width]` """ x = self.style_block1(x, w, noise[0]) x = self.style_block2(x, w, noise[1]) rgb = self.to_rgb(x, w) return x, rgb def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'d_latent': 4, 'in_features': 4, 'out_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import math import numpy as np from torch import nn import torch.nn.functional as F import torch.utils.data from typing import Optional from typing import List import torch.autograd assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_per_fused_add_mul_pow_rsqrt_sum_1(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 rnumel = 36 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] rmask = rindex < rnumel r5 = rindex x0 = xindex % 4 r3 = rindex // 9 x1 = xindex // 4 x4 = xindex tmp0 = tl.load(in_ptr0 + (r5 + 36 * x0), rmask & xmask, eviction_policy ='evict_last', other=0.0) tmp3 = tl.load(in_ptr1 + (r3 + 4 * x1), rmask & xmask, eviction_policy= 'evict_last', other=0.0) tmp1 = 0.16666666666666666 tmp2 = tmp0 * tmp1 tmp4 = tmp2 * tmp3 tmp5 = tmp4 * tmp4 tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp8 = tl.where(rmask & xmask, tmp6, 0) tmp9 = tl.sum(tmp8, 1)[:, None] tmp10 = 1e-08 tmp11 = tmp9 + tmp10 tmp12 = libdevice.rsqrt(tmp11) tmp13 = tmp4 * tmp12 tl.debug_barrier() tl.store(in_out_ptr0 + x4, tmp12, xmask) tl.store(out_ptr0 + (r5 + 36 * x4), tmp13, rmask & xmask) @triton.jit def triton_poi_fused_add_leaky_relu_leaky_relu_backward_mul_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 4 x2 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x4, xmask) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last') tmp4 = tmp2 * tmp3 tmp5 = tmp0 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = 0.0 tmp9 = tmp7 > tmp8 tmp10 = 0.2 tmp11 = tmp7 * tmp10 tmp12 = tl.where(tmp9, tmp7, tmp11) tmp13 = tmp12 > tmp8 tl.store(in_out_ptr0 + x4, tmp12, xmask) tl.store(out_ptr0 + x4, tmp13, xmask) @triton.jit def triton_poi_fused_add_leaky_relu_mul_3(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 4 x2 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x4, xmask) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last') tmp4 = tmp2 * tmp3 tmp5 = tmp0 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = 0.0 tmp9 = tmp7 > tmp8 tmp10 = 0.2 tmp11 = tmp7 * tmp10 tmp12 = tl.where(tmp9, tmp7, tmp11) tl.store(in_out_ptr0 + x4, tmp12, xmask) @triton.jit def triton_poi_fused_mul_4(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 48 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex % 12 x0 = xindex % 4 x2 = xindex // 12 x4 = xindex tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + x4, tmp4, xmask) @triton.jit def triton_poi_fused_add_leaky_relu_leaky_relu_backward_5(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = 0.0 tmp5 = tmp3 > tmp4 tmp6 = 0.2 tmp7 = tmp3 * tmp6 tmp8 = tl.where(tmp5, tmp3, tmp7) tmp9 = tmp8 > tmp4 tl.store(in_out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr0 + x0, tmp9, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_6, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_7, (1,), (1,)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (4, 4), (4, 1)) assert_size_stride(primals_10, (4,), (1,)) assert_size_stride(primals_11, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_12, (1,), (1,)) assert_size_stride(primals_13, (4,), (1,)) assert_size_stride(primals_14, (4, 4), (4, 1)) assert_size_stride(primals_15, (4,), (1,)) assert_size_stride(primals_16, (3, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_17, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(16)](primals_2, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_2 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_3, primals_4, reinterpret_tensor(buf0, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_3 buf2 = reinterpret_tensor(buf0, (4, 4, 1, 1, 1), (4, 1, 16, 16, 16), 0) del buf0 buf3 = reinterpret_tensor(buf2, (4, 4, 1, 1, 1), (4, 1, 1, 1, 1), 0) del buf2 buf4 = empty_strided_cuda((4, 4, 4, 3, 3), (144, 36, 9, 3, 1), torch.float32) triton_per_fused_add_mul_pow_rsqrt_sum_1[grid(16)](buf3, primals_6, buf1, buf4, 16, 36, XBLOCK=1, num_warps=2, num_stages=1) buf5 = extern_kernels.convolution(reinterpret_tensor(primals_5, (1, 16, 4, 4), (256, 16, 4, 1), 0), reinterpret_tensor(buf4, (16, 4, 3, 3), (36, 9, 3, 1), 0), stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=4, bias=None) assert_size_stride(buf5, (1, 16, 4, 4), (256, 16, 4, 1)) buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_mul_0[grid(16)](primals_9, buf6, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_9 buf7 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_10, primals_4, reinterpret_tensor(buf6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf7) del primals_10 buf8 = reinterpret_tensor(buf6, (4, 4, 1, 1, 1), (4, 1, 16, 16, 16), 0) del buf6 buf9 = reinterpret_tensor(buf8, (4, 4, 1, 1, 1), (4, 1, 1, 1, 1), 0) del buf8 buf10 = empty_strided_cuda((4, 4, 4, 3, 3), (144, 36, 9, 3, 1), torch.float32) triton_per_fused_add_mul_pow_rsqrt_sum_1[grid(16)](buf9, primals_11, buf7, buf10, 16, 36, XBLOCK=1, num_warps=2, num_stages=1) buf11 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf5 buf20 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_add_leaky_relu_leaky_relu_backward_mul_2[grid(256)]( buf11, primals_7, primals_1, primals_8, buf20, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_7 del primals_8 buf12 = extern_kernels.convolution(reinterpret_tensor(buf11, (1, 16, 4, 4), (0, 16, 4, 1), 0), reinterpret_tensor(buf10, (16, 4, 3, 3), (36, 9, 3, 1), 0), stride=(1, 1), padding=(1, 1), dilation= (1, 1), transposed=False, output_padding=(0, 0), groups=4, bias =None) assert_size_stride(buf12, (1, 16, 4, 4), (256, 16, 4, 1)) buf13 = reinterpret_tensor(buf12, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf12 triton_poi_fused_add_leaky_relu_mul_3[grid(256)](buf13, primals_12, primals_1, primals_13, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_12 del primals_13 buf14 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_mul_0[grid(16)](primals_14, buf14, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_14 buf15 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_15, primals_4, reinterpret_tensor( buf14, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf15) del buf14 del primals_15 buf16 = empty_strided_cuda((4, 3, 4, 1, 1), (12, 4, 1, 1, 1), torch .float32) triton_poi_fused_mul_4[grid(48)](primals_16, buf15, buf16, 48, XBLOCK=64, num_warps=1, num_stages=1) buf17 = extern_kernels.convolution(reinterpret_tensor(buf13, (1, 16, 4, 4), (256, 16, 4, 1), 0), reinterpret_tensor(buf16, (12, 4, 1, 1), (4, 1, 0, 0), 0), stride=(1, 1), padding=(0, 0), dilation=( 1, 1), transposed=False, output_padding=(0, 0), groups=4, bias=None ) assert_size_stride(buf17, (1, 12, 4, 4), (192, 16, 4, 1)) buf18 = reinterpret_tensor(buf17, (4, 3, 4, 4), (48, 16, 4, 1), 0) del buf17 buf19 = empty_strided_cuda((4, 3, 4, 4), (48, 16, 4, 1), torch.bool) triton_poi_fused_add_leaky_relu_leaky_relu_backward_5[grid(192)](buf18, primals_17, buf19, 192, XBLOCK=128, num_warps=4, num_stages=1) del primals_17 return (buf13, buf18, primals_4, primals_6, primals_11, primals_16, reinterpret_tensor(primals_1, (4,), (1,), 0), buf1, buf3, reinterpret_tensor(primals_5, (1, 16, 4, 4), (256, 16, 4, 1), 0), reinterpret_tensor(buf4, (16, 4, 3, 3), (36, 9, 3, 1), 0), reinterpret_tensor(primals_1, (4,), (1,), 4), buf7, buf9, reinterpret_tensor(buf10, (16, 4, 3, 3), (36, 9, 3, 1), 0), reinterpret_tensor(buf11, (1, 16, 4, 4), (256, 16, 4, 1), 0), buf13, buf15, reinterpret_tensor(buf16, (12, 4, 1, 1), (4, 1, 1, 1), 0), buf19, buf20) class EqualizedWeight(nn.Module): """ <a id="equalized_weight"></a> ## Learning-rate Equalized Weights Parameter This is based on equalized learning rate introduced in the Progressive GAN paper. Instead of initializing weights at $\\mathcal{N}(0,c)$ they initialize weights to $\\mathcal{N}(0, 1)$ and then multiply them by $c$ when using it. $$w_i = c \\hat{w}_i$$ The gradients on stored parameters $\\hat{w}$ get multiplied by $c$ but this doesn't have an affect since optimizers such as Adam normalize them by a running mean of the squared gradients. The optimizer updates on $\\hat{w}$ are proportionate to the learning rate $\\lambda$. But the effective weights $w$ get updated proportionately to $c \\lambda$. Without equalized learning rate, the effective weights will get updated proportionately to just $\\lambda$. So we are effectively scaling the learning rate by $c$ for these weight parameters. """ def __init__(self, shape: 'List[int]'): """ * `shape` is the shape of the weight parameter """ super().__init__() self.c = 1 / math.sqrt(np.prod(shape[1:])) self.weight = nn.Parameter(torch.randn(shape)) def forward(self): return self.weight * self.c class EqualizedLinear(nn.Module): """ <a id="equalized_linear"></a> ## Learning-rate Equalized Linear Layer This uses [learning-rate equalized weights]($equalized_weights) for a linear layer. """ def __init__(self, in_features: 'int', out_features: 'int', bias: 'float'=0.0): """ * `in_features` is the number of features in the input feature map * `out_features` is the number of features in the output feature map * `bias` is the bias initialization constant """ super().__init__() self.weight = EqualizedWeight([out_features, in_features]) self.bias = nn.Parameter(torch.ones(out_features) * bias) def forward(self, x: 'torch.Tensor'): return F.linear(x, self.weight(), bias=self.bias) class Conv2dWeightModulate(nn.Module): """ ### Convolution with Weight Modulation and Demodulation This layer scales the convolution weights by the style vector and demodulates by normalizing it. """ def __init__(self, in_features: 'int', out_features: 'int', kernel_size: 'int', demodulate: 'float'=True, eps: 'float'=1e-08): """ * `in_features` is the number of features in the input feature map * `out_features` is the number of features in the output feature map * `kernel_size` is the size of the convolution kernel * `demodulate` is flag whether to normalize weights by its standard deviation * `eps` is the $\\epsilon$ for normalizing """ super().__init__() self.out_features = out_features self.demodulate = demodulate self.padding = (kernel_size - 1) // 2 self.weight = EqualizedWeight([out_features, in_features, kernel_size, kernel_size]) self.eps = eps def forward(self, x: 'torch.Tensor', s: 'torch.Tensor'): """ * `x` is the input feature map of shape `[batch_size, in_features, height, width]` * `s` is style based scaling tensor of shape `[batch_size, in_features]` """ b, _, h, w = x.shape s = s[:, None, :, None, None] weights = self.weight()[None, :, :, :, :] weights = weights * s if self.demodulate: sigma_inv = torch.rsqrt((weights ** 2).sum(dim=(2, 3, 4), keepdim=True) + self.eps) weights = weights * sigma_inv x = x.reshape(1, -1, h, w) _, _, *ws = weights.shape weights = weights.reshape(b * self.out_features, *ws) x = F.conv2d(x, weights, padding=self.padding, groups=b) return x.reshape(-1, self.out_features, h, w) class StyleBlock(nn.Module): """ <a id="style_block"></a> ### Style Block ![Style block](style_block.svg) *<small>$A$ denotes a linear layer. $B$ denotes a broadcast and scaling operation (noise is single channel).</small>* Style block has a weight modulation convolution layer. """ def __init__(self, d_latent: 'int', in_features: 'int', out_features: 'int' ): """ * `d_latent` is the dimensionality of $w$ * `in_features` is the number of features in the input feature map * `out_features` is the number of features in the output feature map """ super().__init__() self.to_style = EqualizedLinear(d_latent, in_features, bias=1.0) self.conv = Conv2dWeightModulate(in_features, out_features, kernel_size=3) self.scale_noise = nn.Parameter(torch.zeros(1)) self.bias = nn.Parameter(torch.zeros(out_features)) self.activation = nn.LeakyReLU(0.2, True) def forward(self, x: 'torch.Tensor', w: 'torch.Tensor', noise: 'Optional[torch.Tensor]'): """ * `x` is the input feature map of shape `[batch_size, in_features, height, width]` * `w` is $w$ with shape `[batch_size, d_latent]` * `noise` is a tensor of shape `[batch_size, 1, height, width]` """ s = self.to_style(w) x = self.conv(x, s) if noise is not None: x = x + self.scale_noise[None, :, None, None] * noise return self.activation(x + self.bias[None, :, None, None]) class ToRGB(nn.Module): """ <a id="to_rgb"></a> ### To RGB ![To RGB](to_rgb.svg) *<small>$A$ denotes a linear layer.</small>* Generates an RGB image from a feature map using $1 imes 1$ convolution. """ def __init__(self, d_latent: 'int', features: 'int'): """ * `d_latent` is the dimensionality of $w$ * `features` is the number of features in the feature map """ super().__init__() self.to_style = EqualizedLinear(d_latent, features, bias=1.0) self.conv = Conv2dWeightModulate(features, 3, kernel_size=1, demodulate=False) self.bias = nn.Parameter(torch.zeros(1)) self.activation = nn.LeakyReLU(0.2, True) def forward(self, x: 'torch.Tensor', w: 'torch.Tensor'): """ * `x` is the input feature map of shape `[batch_size, in_features, height, width]` * `w` is $w$ with shape `[batch_size, d_latent]` """ style = self.to_style(w) x = self.conv(x, style) return self.activation(x + self.bias[None, :, None, None]) class GeneratorBlockNew(nn.Module): """ <a id="generator_block"></a> ### Generator Block ![Generator block](generator_block.svg) *<small>$A$ denotes a linear layer. $B$ denotes a broadcast and scaling operation (noise is a single channel). [*toRGB*](#to_rgb) also has a style modulation which is not shown in the diagram to keep it simple.</small>* The generator block consists of two [style blocks](#style_block) ($3 imes 3$ convolutions with style modulation) and an RGB output. """ def __init__(self, d_latent: 'int', in_features: 'int', out_features: 'int' ): """ * `d_latent` is the dimensionality of $w$ * `in_features` is the number of features in the input feature map * `out_features` is the number of features in the output feature map """ super().__init__() self.style_block1 = StyleBlock(d_latent, in_features, out_features) self.style_block2 = StyleBlock(d_latent, out_features, out_features) self.to_rgb = ToRGB(d_latent, out_features) def forward(self, input_0, input_1, input_2): primals_7 = self.style_block1.scale_noise primals_3 = self.style_block1.bias primals_8 = self.style_block1.to_style.bias primals_1 = self.style_block1.to_style.weight.weight primals_6 = self.style_block1.conv.weight.weight primals_12 = self.style_block2.scale_noise primals_10 = self.style_block2.bias primals_13 = self.style_block2.to_style.bias primals_2 = self.style_block2.to_style.weight.weight primals_11 = self.style_block2.conv.weight.weight primals_17 = self.to_rgb.bias primals_15 = self.to_rgb.to_style.bias primals_4 = self.to_rgb.to_style.weight.weight primals_16 = self.to_rgb.conv.weight.weight primals_5 = input_0 primals_9 = input_1 primals_14 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17]) return output[0], output[1]
HubBucket-Team/annotated_deep_learning_paper_implementations
GeneratorBlock
false
5,339
[ "MIT" ]
1
4a9716b01e336c57739dfdbdd90648276b53c433
https://github.com/HubBucket-Team/annotated_deep_learning_paper_implementations/tree/4a9716b01e336c57739dfdbdd90648276b53c433
import math import torch import numpy as np from torch import nn import torch.nn.functional as F import torch.utils.data from typing import Tuple from typing import Optional from typing import List import torch.autograd class EqualizedWeight(nn.Module): """ <a id="equalized_weight"></a> ## Learning-rate Equalized Weights Parameter This is based on equalized learning rate introduced in the Progressive GAN paper. Instead of initializing weights at $\\mathcal{N}(0,c)$ they initialize weights to $\\mathcal{N}(0, 1)$ and then multiply them by $c$ when using it. $$w_i = c \\hat{w}_i$$ The gradients on stored parameters $\\hat{w}$ get multiplied by $c$ but this doesn't have an affect since optimizers such as Adam normalize them by a running mean of the squared gradients. The optimizer updates on $\\hat{w}$ are proportionate to the learning rate $\\lambda$. But the effective weights $w$ get updated proportionately to $c \\lambda$. Without equalized learning rate, the effective weights will get updated proportionately to just $\\lambda$. So we are effectively scaling the learning rate by $c$ for these weight parameters. """ def __init__(self, shape: 'List[int]'): """ * `shape` is the shape of the weight parameter """ super().__init__() self.c = 1 / math.sqrt(np.prod(shape[1:])) self.weight = nn.Parameter(torch.randn(shape)) def forward(self): return self.weight * self.c class EqualizedLinear(nn.Module): """ <a id="equalized_linear"></a> ## Learning-rate Equalized Linear Layer This uses [learning-rate equalized weights]($equalized_weights) for a linear layer. """ def __init__(self, in_features: 'int', out_features: 'int', bias: 'float'=0.0): """ * `in_features` is the number of features in the input feature map * `out_features` is the number of features in the output feature map * `bias` is the bias initialization constant """ super().__init__() self.weight = EqualizedWeight([out_features, in_features]) self.bias = nn.Parameter(torch.ones(out_features) * bias) def forward(self, x: 'torch.Tensor'): return F.linear(x, self.weight(), bias=self.bias) class Conv2dWeightModulate(nn.Module): """ ### Convolution with Weight Modulation and Demodulation This layer scales the convolution weights by the style vector and demodulates by normalizing it. """ def __init__(self, in_features: 'int', out_features: 'int', kernel_size: 'int', demodulate: 'float'=True, eps: 'float'=1e-08): """ * `in_features` is the number of features in the input feature map * `out_features` is the number of features in the output feature map * `kernel_size` is the size of the convolution kernel * `demodulate` is flag whether to normalize weights by its standard deviation * `eps` is the $\\epsilon$ for normalizing """ super().__init__() self.out_features = out_features self.demodulate = demodulate self.padding = (kernel_size - 1) // 2 self.weight = EqualizedWeight([out_features, in_features, kernel_size, kernel_size]) self.eps = eps def forward(self, x: 'torch.Tensor', s: 'torch.Tensor'): """ * `x` is the input feature map of shape `[batch_size, in_features, height, width]` * `s` is style based scaling tensor of shape `[batch_size, in_features]` """ b, _, h, w = x.shape s = s[:, None, :, None, None] weights = self.weight()[None, :, :, :, :] weights = weights * s if self.demodulate: sigma_inv = torch.rsqrt((weights ** 2).sum(dim=(2, 3, 4), keepdim=True) + self.eps) weights = weights * sigma_inv x = x.reshape(1, -1, h, w) _, _, *ws = weights.shape weights = # ... truncated (>4000 chars) for memory efficiency
RKDAngleLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/br/cbrq3mokok2a7y3prosu4acvjezfxctq5jmsx6qei4xhyhorsobg.py # Topologically Sorted Source Nodes: [s, s_1], Original ATen: [aten.sub, aten.linalg_vector_norm, aten.clamp_min] # Source node to ATen node mapping: # s => sub_1 # s_1 => clamp_min_1, pow_3, pow_4, sum_2 # Graph fragment: # %sub_1 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%unsqueeze_2, %unsqueeze_3), kwargs = {}) # %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_1, 2), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_3, [2], True), kwargs = {}) # %pow_4 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_2, 0.5), kwargs = {}) # %clamp_min_1 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%pow_4, 1e-12), kwargs = {}) triton_poi_fused_clamp_min_linalg_vector_norm_sub_0 = async_compile.triton('triton_poi_fused_clamp_min_linalg_vector_norm_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clamp_min_linalg_vector_norm_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clamp_min_linalg_vector_norm_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp6 = tmp4 - tmp5 tmp7 = tmp6 * tmp6 tmp8 = tmp3 + tmp7 tmp11 = tmp9 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tmp8 + tmp12 tmp16 = tmp14 - tmp15 tmp17 = tmp16 * tmp16 tmp18 = tmp13 + tmp17 tmp19 = libdevice.sqrt(tmp18) tmp20 = 1e-12 tmp21 = triton_helpers.maximum(tmp19, tmp20) tl.store(out_ptr0 + (x2), tmp21, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/fq/cfq2cxt2zxwkadjog2cyjjj6y6tsw7uxcy3rlvnuuc2o4o63bnrq.py # Topologically Sorted Source Nodes: [s, s_1], Original ATen: [aten.sub, aten.div] # Source node to ATen node mapping: # s => sub_1 # s_1 => div_1 # Graph fragment: # %sub_1 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%unsqueeze_2, %unsqueeze_3), kwargs = {}) # %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_1, %expand_1), kwargs = {}) triton_poi_fused_div_sub_1 = async_compile.triton('triton_poi_fused_div_sub_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_sub_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_sub_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex % 16 x0 = xindex % 4 x2 = (xindex // 16) x4 = (xindex // 4) x5 = xindex tmp0 = tl.load(in_ptr0 + (x3), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (x0 + (4*x2)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x4), xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 / tmp3 tl.store(out_ptr0 + (x5), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/pu/cpuw5nxqsbhmwffwyyjmechnczskntdeind6rtdls4kec7dg4jgu.py # Topologically Sorted Source Nodes: [smooth_l1_loss], Original ATen: [aten.smooth_l1_loss] # Source node to ATen node mapping: # smooth_l1_loss => abs_1, div_2, lt, mean, mul, pow_5, sub_2, sub_3, where # Graph fragment: # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_1, %view), kwargs = {}) # %abs_1 : [num_users=3] = call_function[target=torch.ops.aten.abs.default](args = (%sub_2,), kwargs = {}) # %lt : [num_users=1] = call_function[target=torch.ops.aten.lt.Scalar](args = (%abs_1, 1.0), kwargs = {}) # %pow_5 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%abs_1, 2), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_5, 0.5), kwargs = {}) # %div_2 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul, 1.0), kwargs = {}) # %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%abs_1, 0.5), kwargs = {}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%lt, %div_2, %sub_3), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%where,), kwargs = {}) triton_per_fused_smooth_l1_loss_2 = async_compile.triton('triton_per_fused_smooth_l1_loss_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_smooth_l1_loss_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_smooth_l1_loss_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.load(in_ptr1 + (r0), None) tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp4 = 1.0 tmp5 = tmp3 < tmp4 tmp6 = tmp3 * tmp3 tmp7 = 0.5 tmp8 = tmp6 * tmp7 tmp9 = tmp8 * tmp4 tmp10 = tmp3 - tmp7 tmp11 = tl.where(tmp5, tmp9, tmp10) tmp12 = tl.broadcast_to(tmp11, [XBLOCK, RBLOCK]) tmp14 = tl.sum(tmp12, 1)[:, None] tmp15 = 64.0 tmp16 = tmp14 / tmp15 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp16, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) # Topologically Sorted Source Nodes: [s, s_1], Original ATen: [aten.sub, aten.linalg_vector_norm, aten.clamp_min] stream0 = get_raw_stream(0) triton_poi_fused_clamp_min_linalg_vector_norm_sub_0.run(arg1_1, buf0, 16, grid=grid(16), stream=stream0) buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [s, s_1], Original ATen: [aten.sub, aten.div] triton_poi_fused_div_sub_1.run(arg1_1, buf0, buf1, 64, grid=grid(64), stream=stream0) del arg1_1 buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [bmm_1], Original ATen: [aten.bmm] extern_kernels.bmm(buf1, reinterpret_tensor(buf1, (4, 4, 4), (16, 1, 4), 0), out=buf2) buf3 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [t, t_1], Original ATen: [aten.sub, aten.linalg_vector_norm, aten.clamp_min] triton_poi_fused_clamp_min_linalg_vector_norm_sub_0.run(arg0_1, buf3, 16, grid=grid(16), stream=stream0) buf4 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [t, t_1], Original ATen: [aten.sub, aten.div] triton_poi_fused_div_sub_1.run(arg0_1, buf3, buf4, 64, grid=grid(64), stream=stream0) del arg0_1 del buf3 buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [bmm], Original ATen: [aten.bmm] extern_kernels.bmm(buf4, reinterpret_tensor(buf4, (4, 4, 4), (16, 1, 4), 0), out=buf5) del buf4 buf6 = empty_strided_cuda((), (), torch.float32) buf7 = buf6; del buf6 # reuse # Topologically Sorted Source Nodes: [smooth_l1_loss], Original ATen: [aten.smooth_l1_loss] triton_per_fused_smooth_l1_loss_2.run(buf7, buf2, buf5, 1, 64, grid=grid(1), stream=stream0) del buf2 del buf5 return (buf7, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class RKDAngleLoss(nn.Module): """ Module for calculating RKD Angle Loss """ def forward(self, teacher, student, normalize=True): """ Forward function :param teacher (torch.FloatTensor): Prediction made by the teacher model :param student (torch.FloatTensor): Prediction made by the student model :param normalize (bool): True if inputs need to be normalized """ with torch.no_grad(): t = teacher.unsqueeze(0) - teacher.unsqueeze(1) if normalize: t = F.normalize(t, p=2, dim=2) t = torch.bmm(t, t.transpose(1, 2)).view(-1) s = student.unsqueeze(0) - student.unsqueeze(1) if normalize: s = F.normalize(s, p=2, dim=2) s = torch.bmm(s, s.transpose(1, 2)).view(-1) return F.smooth_l1_loss(s, t) def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clamp_min_linalg_vector_norm_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp14 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp15 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp6 = tmp4 - tmp5 tmp7 = tmp6 * tmp6 tmp8 = tmp3 + tmp7 tmp11 = tmp9 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tmp8 + tmp12 tmp16 = tmp14 - tmp15 tmp17 = tmp16 * tmp16 tmp18 = tmp13 + tmp17 tmp19 = libdevice.sqrt(tmp18) tmp20 = 1e-12 tmp21 = triton_helpers.maximum(tmp19, tmp20) tl.store(out_ptr0 + x2, tmp21, xmask) @triton.jit def triton_poi_fused_div_sub_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex % 16 x0 = xindex % 4 x2 = xindex // 16 x4 = xindex // 4 x5 = xindex tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp3 = tl.load(in_ptr1 + x4, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 / tmp3 tl.store(out_ptr0 + x5, tmp4, xmask) @triton.jit def triton_per_fused_smooth_l1_loss_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp4 = 1.0 tmp5 = tmp3 < tmp4 tmp6 = tmp3 * tmp3 tmp7 = 0.5 tmp8 = tmp6 * tmp7 tmp9 = tmp8 * tmp4 tmp10 = tmp3 - tmp7 tmp11 = tl.where(tmp5, tmp9, tmp10) tmp12 = tl.broadcast_to(tmp11, [XBLOCK, RBLOCK]) tmp14 = tl.sum(tmp12, 1)[:, None] tmp15 = 64.0 tmp16 = tmp14 / tmp15 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp16, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) get_raw_stream(0) triton_poi_fused_clamp_min_linalg_vector_norm_sub_0[grid(16)](arg1_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_div_sub_1[grid(64)](arg1_1, buf0, buf1, 64, XBLOCK =64, num_warps=1, num_stages=1) del arg1_1 buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf1, reinterpret_tensor(buf1, (4, 4, 4), (16, 1, 4), 0), out=buf2) buf3 = buf0 del buf0 triton_poi_fused_clamp_min_linalg_vector_norm_sub_0[grid(16)](arg0_1, buf3, 16, XBLOCK=16, num_warps=1, num_stages=1) buf4 = buf1 del buf1 triton_poi_fused_div_sub_1[grid(64)](arg0_1, buf3, buf4, 64, XBLOCK =64, num_warps=1, num_stages=1) del arg0_1 del buf3 buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf4, reinterpret_tensor(buf4, (4, 4, 4), (16, 1, 4), 0), out=buf5) del buf4 buf6 = empty_strided_cuda((), (), torch.float32) buf7 = buf6 del buf6 triton_per_fused_smooth_l1_loss_2[grid(1)](buf7, buf2, buf5, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del buf2 del buf5 return buf7, class RKDAngleLossNew(nn.Module): """ Module for calculating RKD Angle Loss """ def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
Het-Shah/KD_Lib
RKDAngleLoss
false
5,340
[ "MIT" ]
1
5577250cf74e3a529033b244da9b2b9fcf7623a9
https://github.com/Het-Shah/KD_Lib/tree/5577250cf74e3a529033b244da9b2b9fcf7623a9
import torch import torch.nn as nn import torch.nn.functional as F class Model(nn.Module): """ Module for calculating RKD Angle Loss """ def forward(self, teacher, student, normalize=True): """ Forward function :param teacher (torch.FloatTensor): Prediction made by the teacher model :param student (torch.FloatTensor): Prediction made by the student model :param normalize (bool): True if inputs need to be normalized """ with torch.no_grad(): t = teacher.unsqueeze(0) - teacher.unsqueeze(1) if normalize: t = F.normalize(t, p=2, dim=2) t = torch.bmm(t, t.transpose(1, 2)).view(-1) s = student.unsqueeze(0) - student.unsqueeze(1) if normalize: s = F.normalize(s, p=2, dim=2) s = torch.bmm(s, s.transpose(1, 2)).view(-1) return F.smooth_l1_loss(s, t) def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return []
WingLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/he/chezcq52je3oj7dk5o35h35bnfztpvzmvp3caddzbgyjw55sisqe.py # Topologically Sorted Source Nodes: [sub, diff, log_mask, like_l1_mask], Original ATen: [aten.sub, aten.abs, aten.lt, aten.bitwise_not] # Source node to ATen node mapping: # diff => abs_1 # like_l1_mask => bitwise_not # log_mask => lt # sub => sub # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %arg1_1), kwargs = {}) # %abs_1 : [num_users=2] = call_function[target=torch.ops.aten.abs.default](args = (%sub,), kwargs = {}) # %lt : [num_users=2] = call_function[target=torch.ops.aten.lt.Scalar](args = (%abs_1, 10), kwargs = {}) # %bitwise_not : [num_users=1] = call_function[target=torch.ops.aten.bitwise_not.default](args = (%lt,), kwargs = {}) triton_poi_fused_abs_bitwise_not_lt_sub_0 = async_compile.triton('triton_poi_fused_abs_bitwise_not_lt_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: '*i1', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_abs_bitwise_not_lt_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_abs_bitwise_not_lt_sub_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask) tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp4 = 10.0 tmp5 = tmp3 < tmp4 tmp6 = tmp5 == 0 tl.store(out_ptr0 + (x0), tmp3, xmask) tl.store(out_ptr1 + (x0), tmp5, xmask) tl.store(out_ptr2 + (x0), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [sub, diff, log_mask, like_l1_mask], Original ATen: [aten.sub, aten.abs, aten.lt, aten.bitwise_not] stream0 = get_raw_stream(0) triton_poi_fused_abs_bitwise_not_lt_sub_0.run(arg0_1, arg1_1, buf0, buf1, buf2, 256, grid=grid(256), stream=stream0) del arg0_1 del arg1_1 return (buf0, buf1, buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch def identity(x): return x class WingLoss(torch.nn.Module): def __init__(self, w: 'float'=10, eps: 'float'=2, reduction: 'str'='mean' ) ->None: assert reduction is None or reduction in ('mean', 'sum') super().__init__() self._w = w self._eps = eps self._constant = self._w * (1 - math.log(1 + self._w / self._eps)) if reduction is None: self._reduction_fn = identity elif reduction == 'mean': self._reduction_fn = torch.mean elif reduction == 'sum': self._reduction_fn = torch.sum def forward(self, predicted: 'torch.tensor', target: 'torch.tensor'): """Compute wing loss Predicted and target have size batch_size x 2 * num_landmarks """ diff = torch.abs(predicted - target) log_mask = diff < self._w like_l1_mask = ~log_mask diff[log_mask] = self._w * torch.log(1 + diff[log_mask] / self._eps) diff[like_l1_mask] -= self._constant loss_by_sample = diff.sum(dim=1) return self._reduction_fn(loss_by_sample) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import math assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_abs_bitwise_not_lt_sub_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp4 = 10.0 tmp5 = tmp3 < tmp4 tmp6 = tmp5 == 0 tl.store(out_ptr0 + x0, tmp3, xmask) tl.store(out_ptr1 + x0, tmp5, xmask) tl.store(out_ptr2 + x0, tmp6, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_abs_bitwise_not_lt_sub_0[grid(256)](arg0_1, arg1_1, buf0, buf1, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf0, buf1, buf2 def identity(x): return x class WingLossNew(torch.nn.Module): def __init__(self, w: 'float'=10, eps: 'float'=2, reduction: 'str'='mean' ) ->None: assert reduction is None or reduction in ('mean', 'sum') super().__init__() self._w = w self._eps = eps self._constant = self._w * (1 - math.log(1 + self._w / self._eps)) if reduction is None: self._reduction_fn = identity elif reduction == 'mean': self._reduction_fn = torch.mean elif reduction == 'sum': self._reduction_fn = torch.sum def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
Ilyabasharov/made_mail.ru
WingLoss
false
5,341
[ "MIT" ]
1
a81bfd874ab80eb8c7eaad8a4acf723f327f2f50
https://github.com/Ilyabasharov/made_mail.ru/tree/a81bfd874ab80eb8c7eaad8a4acf723f327f2f50
import math import torch def identity(x): return x class Model(torch.nn.Module): def __init__(self, w: 'float'=10, eps: 'float'=2, reduction: 'str'='mean' ) ->None: assert reduction is None or reduction in ('mean', 'sum') super().__init__() self._w = w self._eps = eps self._constant = self._w * (1 - math.log(1 + self._w / self._eps)) if reduction is None: self._reduction_fn = identity elif reduction == 'mean': self._reduction_fn = torch.mean elif reduction == 'sum': self._reduction_fn = torch.sum def forward(self, predicted: 'torch.tensor', target: 'torch.tensor'): """Compute wing loss Predicted and target have size batch_size x 2 * num_landmarks """ diff = torch.abs(predicted - target) log_mask = diff < self._w like_l1_mask = ~log_mask diff[log_mask] = self._w * torch.log(1 + diff[log_mask] / self._eps) diff[like_l1_mask] -= self._constant loss_by_sample = diff.sum(dim=1) return self._reduction_fn(loss_by_sample) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
SPPLayer
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/4m/c4m5w4y7qnqrt7o32xjvwkhlxdmbadwmibuly55v4t4jrg6szca5.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.constant_pad_nd] # Source node to ATen node mapping: # x => constant_pad_nd # Graph fragment: # %constant_pad_nd : [num_users=1] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%arg0_1, [0, 0, 0, 0], 0.0), kwargs = {}) triton_poi_fused_constant_pad_nd_0 = async_compile.triton('triton_poi_fused_constant_pad_nd_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_constant_pad_nd_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tl.store(out_ptr0 + (x0), tmp0, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.constant_pad_nd] stream0 = get_raw_stream(0) triton_poi_fused_constant_pad_nd_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch class SPPLayer(torch.nn.Module): def __init__(self, level): super(SPPLayer, self).__init__() self.level = level def forward(self, x): _n, _c, _h, _w = x.size() a = 6 + (self.level - 1) * -2 zero_pad = torch.nn.ZeroPad2d((a, a, a, a)) x = zero_pad(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'level': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tl.store(out_ptr0 + x0, tmp0, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_constant_pad_nd_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class SPPLayerNew(torch.nn.Module): def __init__(self, level): super(SPPLayerNew, self).__init__() self.level = level def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
IrisDinge/YoloV3_DOTA
SPPLayer
false
5,342
[ "MIT" ]
1
cdfe6375a2323e9ee162e50a46478d8a66529e6c
https://github.com/IrisDinge/YoloV3_DOTA/tree/cdfe6375a2323e9ee162e50a46478d8a66529e6c
import torch class Model(torch.nn.Module): def __init__(self, level): super().__init__() self.level = level def forward(self, x): _n, _c, _h, _w = x.size() a = 6 + (self.level - 1) * -2 zero_pad = torch.nn.ZeroPad2d((a, a, a, a)) x = zero_pad(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4]
AttentiveNet
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/yd/cydbtjoq352gcolmflbvu2nqkda7xg7q5hnvltb47jsg5dbmubym.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.clone] # Source node to ATen node mapping: # x => clone # Graph fragment: # %clone : [num_users=2] = call_function[target=torch.ops.aten.clone.default](args = (%permute,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/au/cau4pihcaptiev5y2ewn2o2nvrwhk7hogc72cofmmtbyv4rxc2oy.py # Topologically Sorted Source Nodes: [x1], Original ATen: [aten.convolution] # Source node to ATen node mapping: # x1 => convolution # Graph fragment: # %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%clone, %primals_2, %primals_3, [1], [0], [1], False, [0], 1), kwargs = {}) triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 4) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/ct/cctfh6tyid75qkwqsfny7tvowttt2qc75qajnfjptl4jnmnqmyyn.py # Topologically Sorted Source Nodes: [x_2, out, out_1], Original ATen: [aten.clone, aten.mean, aten.relu] # Source node to ATen node mapping: # out => mean # out_1 => relu # x_2 => clone_1 # Graph fragment: # %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_1,), kwargs = {memory_format: torch.contiguous_format}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%clone_1, [1]), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%mean,), kwargs = {}) triton_poi_fused_clone_mean_relu_2 = async_compile.triton('triton_poi_fused_clone_mean_relu_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_mean_relu_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_mean_relu_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (4*x2), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + (4*x2)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + (4*x2)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp3 + tmp1 tmp5 = tmp2 + tmp4 tmp7 = tmp6 + tmp1 tmp8 = tmp5 + tmp7 tmp10 = tmp9 + tmp1 tmp11 = tmp8 + tmp10 tmp12 = 4.0 tmp13 = tmp11 / tmp12 tmp14 = tl.full([1], 0, tl.int32) tmp15 = triton_helpers.maximum(tmp14, tmp13) tl.store(out_ptr0 + (x2), tmp15, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/e3/ce3zq3if75pjpbnxucmwjdn7zpbggif47kgvtqphxu6ixgwdw6n2.py # Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.sigmoid, aten.sigmoid_backward] # Source node to ATen node mapping: # out_2 => sigmoid # Graph fragment: # %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_7), kwargs = {}) # %sigmoid : [num_users=3] = call_function[target=torch.ops.aten.sigmoid.default](args = (%add_tensor,), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %sigmoid), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %sub), kwargs = {}) triton_poi_fused_sigmoid_sigmoid_backward_3 = async_compile.triton('triton_poi_fused_sigmoid_sigmoid_backward_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_sigmoid_backward_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_sigmoid_sigmoid_backward_3(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr0 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.sigmoid(tmp3) tmp5 = 1.0 tmp6 = tmp5 - tmp4 tmp7 = tmp4 * tmp6 tl.store(in_out_ptr0 + (x0), tmp4, xmask) tl.store(out_ptr0 + (x0), tmp7, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 4, 3), (12, 3, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (1, 4), (4, 1)) assert_size_stride(primals_7, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.clone] stream0 = get_raw_stream(0) triton_poi_fused_clone_0.run(primals_1, buf0, 16, 4, grid=grid(16, 4), stream=stream0) del primals_1 # Topologically Sorted Source Nodes: [x1], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4), (16, 4, 1)) buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [x1], Original ATen: [aten.convolution] triton_poi_fused_convolution_1.run(buf2, primals_3, 64, grid=grid(64), stream=stream0) del primals_3 # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution] buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1,), padding=(1,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 4), (16, 4, 1)) buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_2, out, out_1], Original ATen: [aten.clone, aten.mean, aten.relu] triton_poi_fused_clone_mean_relu_2.run(buf3, primals_5, buf4, 16, grid=grid(16), stream=stream0) del buf3 del primals_5 buf5 = empty_strided_cuda((4, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf4, reinterpret_tensor(primals_6, (4, 1), (1, 4), 0), out=buf5) buf6 = buf5; del buf5 # reuse buf7 = empty_strided_cuda((4, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.sigmoid, aten.sigmoid_backward] triton_poi_fused_sigmoid_sigmoid_backward_3.run(buf6, primals_7, buf7, 4, grid=grid(4), stream=stream0) del primals_7 return (reinterpret_tensor(buf6, (4, ), (1, ), 0), primals_2, primals_4, buf0, buf2, buf4, buf7, primals_6, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 3), (12, 3, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn import torch.nn.functional as F class AttentiveNet(nn.Module): def __init__(self, input_size, hidden_size) ->None: super().__init__() self.cov2 = nn.Conv1d(hidden_size, hidden_size, kernel_size=3, padding=1) self.cov1 = nn.Conv1d(input_size, hidden_size, kernel_size=1, padding=0 ) self.dense = nn.Linear(hidden_size, 1) def forward(self, x): x = x.permute(0, 2, 1).contiguous() x1 = self.cov1(x) x = self.cov2(x1) x = x.permute(0, 2, 1).contiguous() out = torch.mean(x, dim=1) out = F.relu(out) out = F.sigmoid(self.dense(out)) return out.squeeze() def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'input_size': 4, 'hidden_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused_clone_mean_relu_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + 4 * x2, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x2), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x2), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x2), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp3 + tmp1 tmp5 = tmp2 + tmp4 tmp7 = tmp6 + tmp1 tmp8 = tmp5 + tmp7 tmp10 = tmp9 + tmp1 tmp11 = tmp8 + tmp10 tmp12 = 4.0 tmp13 = tmp11 / tmp12 tmp14 = tl.full([1], 0, tl.int32) tmp15 = triton_helpers.maximum(tmp14, tmp13) tl.store(out_ptr0 + x2, tmp15, xmask) @triton.jit def triton_poi_fused_sigmoid_sigmoid_backward_3(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.sigmoid(tmp3) tmp5 = 1.0 tmp6 = tmp5 - tmp4 tmp7 = tmp4 * tmp6 tl.store(in_out_ptr0 + x0, tmp4, xmask) tl.store(out_ptr0 + x0, tmp7, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4, 3), (12, 3, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (1, 4), (4, 1)) assert_size_stride(primals_7, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(16, 4)](primals_1, buf0, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4), (16, 4, 1)) buf2 = buf1 del buf1 triton_poi_fused_convolution_1[grid(64)](buf2, primals_3, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_3 buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1,), padding=(1,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 4), (16, 4, 1)) buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_clone_mean_relu_2[grid(16)](buf3, primals_5, buf4, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf3 del primals_5 buf5 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.mm(buf4, reinterpret_tensor(primals_6, (4, 1), (1, 4 ), 0), out=buf5) buf6 = buf5 del buf5 buf7 = empty_strided_cuda((4, 1), (1, 1), torch.float32) triton_poi_fused_sigmoid_sigmoid_backward_3[grid(4)](buf6, primals_7, buf7, 4, XBLOCK=4, num_warps=1, num_stages=1) del primals_7 return reinterpret_tensor(buf6, (4,), (1,), 0 ), primals_2, primals_4, buf0, buf2, buf4, buf7, primals_6 class AttentiveNetNew(nn.Module): def __init__(self, input_size, hidden_size) ->None: super().__init__() self.cov2 = nn.Conv1d(hidden_size, hidden_size, kernel_size=3, padding=1) self.cov1 = nn.Conv1d(input_size, hidden_size, kernel_size=1, padding=0 ) self.dense = nn.Linear(hidden_size, 1) def forward(self, input_0): primals_4 = self.cov2.weight primals_3 = self.cov2.bias primals_2 = self.cov1.weight primals_5 = self.cov1.bias primals_6 = self.dense.weight primals_7 = self.dense.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
ISYSLAB-HUST/DeepNeuropePred
AttentiveNet
false
5,344
[ "MIT" ]
1
f87f36fdbbc966f727eb063a0f9984850294ba37
https://github.com/ISYSLAB-HUST/DeepNeuropePred/tree/f87f36fdbbc966f727eb063a0f9984850294ba37
import torch from torch import nn import torch.nn.functional as F class Model(nn.Module): def __init__(self, input_size, hidden_size) ->None: super().__init__() self.cov2 = nn.Conv1d(hidden_size, hidden_size, kernel_size=3, padding=1) self.cov1 = nn.Conv1d(input_size, hidden_size, kernel_size=1, padding=0 ) self.dense = nn.Linear(hidden_size, 1) def forward(self, x): x = x.permute(0, 2, 1).contiguous() x1 = self.cov1(x) x = self.cov2(x1) x = x.permute(0, 2, 1).contiguous() out = torch.mean(x, dim=1) out = F.relu(out) out = F.sigmoid(self.dense(out)) return out.squeeze() def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [4, 4]
CReLU
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/zz/czzkkvezj7bfphq4ju7vkoi3v7pdundv4ciwowwf6wyjdxvj442e.py # Topologically Sorted Source Nodes: [x1, y, y_1, y_2], Original ATen: [aten.cat, aten.mul, aten.add, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # x1 => cat # y => mul # y_1 => add # y_2 => relu # Graph fragment: # %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %neg], 1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%cat, %primals_2), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %primals_3), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_add_cat_mul_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_add_cat_mul_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*i1', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_cat_mul_relu_threshold_backward_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_cat_mul_relu_threshold_backward_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK : tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 16) % 8 x0 = xindex % 16 x2 = (xindex // 128) x3 = xindex tmp14 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + (16*x1) + (64*x2)), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.load(in_ptr0 + (x0 + (16*((-4) + x1)) + (64*x2)), tmp6 & xmask, other=0.0) tmp10 = -tmp9 tmp11 = tl.full(tmp10.shape, 0.0, tmp10.dtype) tmp12 = tl.where(tmp6, tmp10, tmp11) tmp13 = tl.where(tmp4, tmp5, tmp12) tmp15 = tmp13 * tmp14 tmp17 = tmp15 + tmp16 tmp18 = tl.full([1], 0, tl.int32) tmp19 = triton_helpers.maximum(tmp18, tmp17) tmp20 = 0.0 tmp21 = tmp19 <= tmp20 tl.store(out_ptr0 + (x3), tmp13, xmask) tl.store(out_ptr1 + (x3), tmp19, xmask) tl.store(out_ptr2 + (x3), tmp21, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1, 8, 1, 1), (8, 1, 1, 1)) assert_size_stride(primals_3, (1, 8, 1, 1), (8, 1, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32) buf1 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32) buf2 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [x1, y, y_1, y_2], Original ATen: [aten.cat, aten.mul, aten.add, aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_add_cat_mul_relu_threshold_backward_0.run(primals_1, primals_2, primals_3, buf0, buf1, buf2, 512, grid=grid(512), stream=stream0) del primals_1 del primals_2 del primals_3 return (buf1, buf0, buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((1, 8, 1, 1), (8, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((1, 8, 1, 1), (8, 1, 1, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class Scale(nn.Module): def __init__(self, nchannels, bias=True, init_scale=1.0): super().__init__() self.nchannels = nchannels self.weight = nn.Parameter(torch.Tensor(1, nchannels, 1, 1)) if bias: self.bias = nn.Parameter(torch.Tensor(1, nchannels, 1, 1)) else: self.register_parameter('bias', None) self.reset_parameters(init_scale) def reset_parameters(self, init_scale=1.0): self.weight.data.fill_(init_scale) if self.bias is not None: self.bias.data.fill_(0.0) def forward(self, x): y = x * self.weight if self.bias is not None: y += self.bias return y def __repr__(self): s = '{} ({}, {})' return s.format(self.__class__.__name__, self.nchannels, self.bias is not None) class CReLU(nn.Module): def __init__(self, nchannels): super().__init__() self.scale = Scale(2 * nchannels) self.relu = nn.ReLU(inplace=True) self.in_channels = nchannels self.out_channels = 2 * nchannels def forward(self, x): x1 = torch.cat((x, -x), 1) x2 = self.scale(x1) y = self.relu(x2) return y def __repr__(self): s = '{name} ({in_channels}, {out_channels})' return s.format(name=self.__class__.__name__, **self.__dict__) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'nchannels': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_cat_mul_relu_threshold_backward_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 % 8 x0 = xindex % 16 x2 = xindex // 128 x3 = xindex tmp14 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr0 + (x0 + 16 * (-4 + x1) + 64 * x2), tmp6 & xmask, other=0.0) tmp10 = -tmp9 tmp11 = tl.full(tmp10.shape, 0.0, tmp10.dtype) tmp12 = tl.where(tmp6, tmp10, tmp11) tmp13 = tl.where(tmp4, tmp5, tmp12) tmp15 = tmp13 * tmp14 tmp17 = tmp15 + tmp16 tmp18 = tl.full([1], 0, tl.int32) tmp19 = triton_helpers.maximum(tmp18, tmp17) tmp20 = 0.0 tmp21 = tmp19 <= tmp20 tl.store(out_ptr0 + x3, tmp13, xmask) tl.store(out_ptr1 + x3, tmp19, xmask) tl.store(out_ptr2 + x3, tmp21, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1, 8, 1, 1), (8, 1, 1, 1)) assert_size_stride(primals_3, (1, 8, 1, 1), (8, 1, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32) buf1 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32) buf2 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_add_cat_mul_relu_threshold_backward_0[grid(512)]( primals_1, primals_2, primals_3, buf0, buf1, buf2, 512, XBLOCK= 256, num_warps=4, num_stages=1) del primals_1 del primals_2 del primals_3 return buf1, buf0, buf2 class Scale(nn.Module): def __init__(self, nchannels, bias=True, init_scale=1.0): super().__init__() self.nchannels = nchannels self.weight = nn.Parameter(torch.Tensor(1, nchannels, 1, 1)) if bias: self.bias = nn.Parameter(torch.Tensor(1, nchannels, 1, 1)) else: self.register_parameter('bias', None) self.reset_parameters(init_scale) def reset_parameters(self, init_scale=1.0): self.weight.data.fill_(init_scale) if self.bias is not None: self.bias.data.fill_(0.0) def forward(self, x): y = x * self.weight if self.bias is not None: y += self.bias return y def __repr__(self): s = '{} ({}, {})' return s.format(self.__class__.__name__, self.nchannels, self.bias is not None) class CReLUNew(nn.Module): def __init__(self, nchannels): super().__init__() self.scale = Scale(2 * nchannels) self.relu = nn.ReLU(inplace=True) self.in_channels = nchannels self.out_channels = 2 * nchannels def __repr__(self): s = '{name} ({in_channels}, {out_channels})' return s.format(name=self.__class__.__name__, **self.__dict__) def forward(self, input_0): primals_2 = self.scale.weight primals_3 = self.scale.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
IrisDinge/YoloV3_DOTA
CReLU
false
5,345
[ "MIT" ]
1
cdfe6375a2323e9ee162e50a46478d8a66529e6c
https://github.com/IrisDinge/YoloV3_DOTA/tree/cdfe6375a2323e9ee162e50a46478d8a66529e6c
import torch import torch.nn as nn class Scale(nn.Module): def __init__(self, nchannels, bias=True, init_scale=1.0): super().__init__() self.nchannels = nchannels self.weight = nn.Parameter(torch.Tensor(1, nchannels, 1, 1)) if bias: self.bias = nn.Parameter(torch.Tensor(1, nchannels, 1, 1)) else: self.register_parameter('bias', None) self.reset_parameters(init_scale) def reset_parameters(self, init_scale=1.0): self.weight.data.fill_(init_scale) if self.bias is not None: self.bias.data.fill_(0.0) def forward(self, x): y = x * self.weight if self.bias is not None: y += self.bias return y def __repr__(self): s = '{} ({}, {})' return s.format(self.__class__.__name__, self.nchannels, self.bias is not None) class Model(nn.Module): def __init__(self, nchannels): super().__init__() self.scale = Scale(2 * nchannels) self.relu = nn.ReLU(inplace=True) self.in_channels = nchannels self.out_channels = 2 * nchannels def forward(self, x): x1 = torch.cat((x, -x), 1) x2 = self.scale(x1) y = self.relu(x2) return y def __repr__(self): s = '{name} ({in_channels}, {out_channels})' return s.format(name=self.__class__.__name__, **self.__dict__) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4]
InferenceBatchSoftmax
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/wx/cwxwvlntewdrqi2r4caciy5ht4jdvafnhtiqncr4lo4aegcb4imz.py # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] # Source node to ATen node mapping: # softmax => amax, exp, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%arg0_1, [-1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x2), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/fj/cfjl47pvhwbpfbvh6rfehwy5ijxc5p3zgkld2lwf3mw5bl6pbkak.py # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] # Source node to ATen node mapping: # softmax => div, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] stream0 = get_raw_stream(0) triton_poi_fused__softmax_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf0, buf1, 256, grid=grid(256), stream=stream0) del buf0 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn from itertools import product as product from math import sqrt as sqrt from torch.nn import init as init from torch.nn import functional as F class InferenceBatchSoftmax(nn.Module): def __init__(self): super(InferenceBatchSoftmax, self).__init__() @staticmethod def forward(input_): return F.softmax(input_, dim=-1) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn from itertools import product as product from math import sqrt as sqrt from torch.nn import init as init assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(256)](arg0_1, buf0, 256, XBLOCK= 128, num_warps=4, num_stages=1) del arg0_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_1[grid(256)](buf0, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf0 return buf1, class InferenceBatchSoftmaxNew(nn.Module): def __init__(self): super(InferenceBatchSoftmaxNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
IncyLiu/autokeras
InferenceBatchSoftmax
false
5,346
[ "MIT" ]
1
e9dbf66b005e2ffaabe29bc366bb4e72fa79add8
https://github.com/IncyLiu/autokeras/tree/e9dbf66b005e2ffaabe29bc366bb4e72fa79add8
import torch import torch.nn as nn from itertools import product as product from math import sqrt as sqrt from torch.nn import init as init from torch.nn import functional as F class Model(nn.Module): def __init__(self): super().__init__() @staticmethod def forward(input_): return F.softmax(input_, dim=-1) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
ScaledDotProductAttention
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/mr/cmr7a6ykanzkwpcsrqlcnz2nradw5cvqpademfs4557bpgql5b44.py # Topologically Sorted Source Nodes: [attn_1, mul, result, result_1, sum_1], Original ATen: [aten.div, aten.mul, aten._softmax, aten.sum] # Source node to ATen node mapping: # attn_1 => div # mul => mul # result => amax, div_1, exp, sub, sum_1 # result_1 => mul_1 # sum_1 => sum_2 # Graph fragment: # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%bmm, 4), kwargs = {}) # %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, %arg2_1), kwargs = {}) # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul, [2], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [2], True), kwargs = {}) # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) # %mul_1 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div_1, %arg2_1), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_1, [2], True), kwargs = {}) triton_poi_fused__softmax_div_mul_sum_0 = async_compile.triton('triton_poi_fused__softmax_div_mul_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_div_mul_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_div_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp1 = 0.25 tmp2 = tmp0 * tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp5 * tmp1 tmp8 = tmp6 * tmp7 tmp9 = triton_helpers.maximum(tmp4, tmp8) tmp11 = tmp10 * tmp1 tmp13 = tmp11 * tmp12 tmp14 = triton_helpers.maximum(tmp9, tmp13) tmp16 = tmp15 * tmp1 tmp18 = tmp16 * tmp17 tmp19 = triton_helpers.maximum(tmp14, tmp18) tmp20 = tmp4 - tmp19 tmp21 = tl_math.exp(tmp20) tmp22 = tmp8 - tmp19 tmp23 = tl_math.exp(tmp22) tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp19 tmp26 = tl_math.exp(tmp25) tmp27 = tmp24 + tmp26 tmp28 = tmp18 - tmp19 tmp29 = tl_math.exp(tmp28) tmp30 = tmp27 + tmp29 tmp31 = tmp21 / tmp30 tmp32 = tmp31 * tmp3 tmp33 = tmp23 / tmp30 tmp34 = tmp33 * tmp7 tmp35 = tmp32 + tmp34 tmp36 = tmp26 / tmp30 tmp37 = tmp36 * tmp12 tmp38 = tmp35 + tmp37 tmp39 = tmp29 / tmp30 tmp40 = tmp39 * tmp17 tmp41 = tmp38 + tmp40 tl.store(out_ptr0 + (x0), tmp19, xmask) tl.store(out_ptr1 + (x0), tmp30, xmask) tl.store(out_ptr2 + (x0), tmp41, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/cg/ccgvbcuuaussyqitdvez74m3h3ffsx647g73pwhsr5ylvmy3s67d.py # Topologically Sorted Source Nodes: [attn_1, mul, result, result_1, add, result_2], Original ATen: [aten.div, aten.mul, aten._softmax, aten.add] # Source node to ATen node mapping: # add => add # attn_1 => div # mul => mul # result => amax, div_1, exp, sub # result_1 => mul_1 # result_2 => div_2 # Graph fragment: # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%bmm, 4), kwargs = {}) # %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, %arg2_1), kwargs = {}) # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul, [2], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) # %mul_1 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div_1, %arg2_1), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_2, 1e-13), kwargs = {}) # %div_2 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_1, %add), kwargs = {}) triton_poi_fused__softmax_add_div_mul_1 = async_compile.triton('triton_poi_fused__softmax_add_div_mul_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_add_div_mul_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_add_div_mul_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp3 = tl.load(in_ptr0 + (x2), xmask) tmp5 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last') tmp1 = 0.25 tmp2 = tmp0 * tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 - tmp5 tmp7 = tl_math.exp(tmp6) tmp9 = tmp7 / tmp8 tmp10 = tmp9 * tmp3 tmp12 = 1e-13 tmp13 = tmp11 + tmp12 tmp14 = tmp10 / tmp13 tl.store(in_out_ptr0 + (x2), tmp14, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1, arg2_1, arg3_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg3_1, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [attn], Original ATen: [aten.bmm] extern_kernels.bmm(arg1_1, reinterpret_tensor(arg0_1, (4, 4, 4), (16, 1, 4), 0), out=buf0) del arg0_1 del arg1_1 buf1 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf2 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf3 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) # Topologically Sorted Source Nodes: [attn_1, mul, result, result_1, sum_1], Original ATen: [aten.div, aten.mul, aten._softmax, aten.sum] stream0 = get_raw_stream(0) triton_poi_fused__softmax_div_mul_sum_0.run(buf0, arg2_1, buf1, buf2, buf3, 16, grid=grid(16), stream=stream0) buf4 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [attn_1, mul, result, result_1, add, result_2], Original ATen: [aten.div, aten.mul, aten._softmax, aten.add] triton_poi_fused__softmax_add_div_mul_1.run(buf4, arg2_1, buf1, buf2, buf3, 64, grid=grid(64), stream=stream0) del arg2_1 del buf1 del buf2 del buf3 buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [output], Original ATen: [aten.bmm] extern_kernels.bmm(buf4, arg3_1, out=buf5) del arg3_1 return (buf5, buf4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) arg2_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) arg3_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1, arg2_1, arg3_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch def masked_softmax(vector: 'torch.Tensor', mask: 'torch.Tensor', dim: 'int' =-1, memory_efficient: 'bool'=False, mask_fill_value: 'float'=-1e+32 ) ->torch.Tensor: """ https://github.com/allenai/allennlp/blob/b6cc9d39651273e8ec2a7e334908ffa9de5c2026/allennlp/nn/util.py#L231 ``torch.nn.functional.softmax(vector)`` does not work if some elements of ``vector`` should be masked. This performs a softmax on just the non-masked portions of ``vector``. Passing ``None`` in for the mask is also acceptable; you'll just get a regular softmax. ``vector`` can have an arbitrary number of dimensions; the only requirement is that ``mask`` is broadcastable to ``vector's`` shape. If ``mask`` has fewer dimensions than ``vector``, we will unsqueeze on dimension 1 until they match. If you need a different unsqueezing of your mask, do it yourself before passing the mask into this function. If ``memory_efficient`` is set to true, we will simply use a very large negative number for those masked positions so that the probabilities of those positions would be approximately 0. This is not accurate in math, but works for most cases and consumes less memory. In the case that the input vector is completely masked and ``memory_efficient`` is false, this function returns an array of ``0.0``. This behavior may cause ``NaN`` if this is used as the last layer of a model that uses categorical cross-entropy loss. Instead, if ``memory_efficient`` is true, this function will treat every element as equal, and do softmax over equal numbers. """ if mask is None: result = torch.nn.functional.softmax(vector, dim=dim) else: mask = mask.float() while mask.dim() < vector.dim(): mask = mask.unsqueeze(1) if not memory_efficient: result = torch.nn.functional.softmax(vector * mask, dim=dim) result = result * mask result = result / (result.sum(dim=dim, keepdim=True) + 1e-13) else: masked_vector = vector.masked_fill((1 - mask).byte(), mask_fill_value) result = torch.nn.functional.softmax(masked_vector, dim=dim) return result class ScaledDotProductAttention(torch.nn.Module): """ Scaled Dot-Product Attention """ def __init__(self, temperature, attn_dropout=0.1): super().__init__() self.temperature = temperature self.dropout = torch.nn.Dropout(attn_dropout) def forward(self, q, k, v, mask): attn = torch.bmm(q, k.transpose(1, 2)) attn = attn / self.temperature attn = masked_softmax(attn, mask, 2) __attn = self.dropout(attn) output = torch.bmm(__attn, v) return output, attn def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'temperature': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__softmax_div_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp15 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp17 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp1 = 0.25 tmp2 = tmp0 * tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp5 * tmp1 tmp8 = tmp6 * tmp7 tmp9 = triton_helpers.maximum(tmp4, tmp8) tmp11 = tmp10 * tmp1 tmp13 = tmp11 * tmp12 tmp14 = triton_helpers.maximum(tmp9, tmp13) tmp16 = tmp15 * tmp1 tmp18 = tmp16 * tmp17 tmp19 = triton_helpers.maximum(tmp14, tmp18) tmp20 = tmp4 - tmp19 tmp21 = tl_math.exp(tmp20) tmp22 = tmp8 - tmp19 tmp23 = tl_math.exp(tmp22) tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp19 tmp26 = tl_math.exp(tmp25) tmp27 = tmp24 + tmp26 tmp28 = tmp18 - tmp19 tmp29 = tl_math.exp(tmp28) tmp30 = tmp27 + tmp29 tmp31 = tmp21 / tmp30 tmp32 = tmp31 * tmp3 tmp33 = tmp23 / tmp30 tmp34 = tmp33 * tmp7 tmp35 = tmp32 + tmp34 tmp36 = tmp26 / tmp30 tmp37 = tmp36 * tmp12 tmp38 = tmp35 + tmp37 tmp39 = tmp29 / tmp30 tmp40 = tmp39 * tmp17 tmp41 = tmp38 + tmp40 tl.store(out_ptr0 + x0, tmp19, xmask) tl.store(out_ptr1 + x0, tmp30, xmask) tl.store(out_ptr2 + x0, tmp41, xmask) @triton.jit def triton_poi_fused__softmax_add_div_mul_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr0 + x2, xmask) tmp5 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp1 = 0.25 tmp2 = tmp0 * tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 - tmp5 tmp7 = tl_math.exp(tmp6) tmp9 = tmp7 / tmp8 tmp10 = tmp9 * tmp3 tmp12 = 1e-13 tmp13 = tmp11 + tmp12 tmp14 = tmp10 / tmp13 tl.store(in_out_ptr0 + x2, tmp14, xmask) def call(args): arg0_1, arg1_1, arg2_1, arg3_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg3_1, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(arg1_1, reinterpret_tensor(arg0_1, (4, 4, 4), ( 16, 1, 4), 0), out=buf0) del arg0_1 del arg1_1 buf1 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf2 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf3 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_div_mul_sum_0[grid(16)](buf0, arg2_1, buf1, buf2, buf3, 16, XBLOCK=16, num_warps=1, num_stages=1) buf4 = buf0 del buf0 triton_poi_fused__softmax_add_div_mul_1[grid(64)](buf4, arg2_1, buf1, buf2, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg2_1 del buf1 del buf2 del buf3 buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf4, arg3_1, out=buf5) del arg3_1 return buf5, buf4 def masked_softmax(vector: 'torch.Tensor', mask: 'torch.Tensor', dim: 'int' =-1, memory_efficient: 'bool'=False, mask_fill_value: 'float'=-1e+32 ) ->torch.Tensor: """ https://github.com/allenai/allennlp/blob/b6cc9d39651273e8ec2a7e334908ffa9de5c2026/allennlp/nn/util.py#L231 ``torch.nn.functional.softmax(vector)`` does not work if some elements of ``vector`` should be masked. This performs a softmax on just the non-masked portions of ``vector``. Passing ``None`` in for the mask is also acceptable; you'll just get a regular softmax. ``vector`` can have an arbitrary number of dimensions; the only requirement is that ``mask`` is broadcastable to ``vector's`` shape. If ``mask`` has fewer dimensions than ``vector``, we will unsqueeze on dimension 1 until they match. If you need a different unsqueezing of your mask, do it yourself before passing the mask into this function. If ``memory_efficient`` is set to true, we will simply use a very large negative number for those masked positions so that the probabilities of those positions would be approximately 0. This is not accurate in math, but works for most cases and consumes less memory. In the case that the input vector is completely masked and ``memory_efficient`` is false, this function returns an array of ``0.0``. This behavior may cause ``NaN`` if this is used as the last layer of a model that uses categorical cross-entropy loss. Instead, if ``memory_efficient`` is true, this function will treat every element as equal, and do softmax over equal numbers. """ if mask is None: result = torch.nn.functional.softmax(vector, dim=dim) else: mask = mask.float() while mask.dim() < vector.dim(): mask = mask.unsqueeze(1) if not memory_efficient: result = torch.nn.functional.softmax(vector * mask, dim=dim) result = result * mask result = result / (result.sum(dim=dim, keepdim=True) + 1e-13) else: masked_vector = vector.masked_fill((1 - mask).byte(), mask_fill_value) result = torch.nn.functional.softmax(masked_vector, dim=dim) return result class ScaledDotProductAttentionNew(torch.nn.Module): """ Scaled Dot-Product Attention """ def __init__(self, temperature, attn_dropout=0.1): super().__init__() self.temperature = temperature self.dropout = torch.nn.Dropout(attn_dropout) def forward(self, input_0, input_1, input_2, input_3): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 arg3_1 = input_3 output = call([arg0_1, arg1_1, arg2_1, arg3_1]) return output[0], output[1]
IouJenLiu/AFK
ScaledDotProductAttention
false
5,347
[ "MIT" ]
1
db2b47bb3a5614b61766114b87f143e4a61a4a8d
https://github.com/IouJenLiu/AFK/tree/db2b47bb3a5614b61766114b87f143e4a61a4a8d
import torch def masked_softmax(vector: 'torch.Tensor', mask: 'torch.Tensor', dim: 'int' =-1, memory_efficient: 'bool'=False, mask_fill_value: 'float'=-1e+32 ) ->torch.Tensor: """ https://github.com/allenai/allennlp/blob/b6cc9d39651273e8ec2a7e334908ffa9de5c2026/allennlp/nn/util.py#L231 ``torch.nn.functional.softmax(vector)`` does not work if some elements of ``vector`` should be masked. This performs a softmax on just the non-masked portions of ``vector``. Passing ``None`` in for the mask is also acceptable; you'll just get a regular softmax. ``vector`` can have an arbitrary number of dimensions; the only requirement is that ``mask`` is broadcastable to ``vector's`` shape. If ``mask`` has fewer dimensions than ``vector``, we will unsqueeze on dimension 1 until they match. If you need a different unsqueezing of your mask, do it yourself before passing the mask into this function. If ``memory_efficient`` is set to true, we will simply use a very large negative number for those masked positions so that the probabilities of those positions would be approximately 0. This is not accurate in math, but works for most cases and consumes less memory. In the case that the input vector is completely masked and ``memory_efficient`` is false, this function returns an array of ``0.0``. This behavior may cause ``NaN`` if this is used as the last layer of a model that uses categorical cross-entropy loss. Instead, if ``memory_efficient`` is true, this function will treat every element as equal, and do softmax over equal numbers. """ if mask is None: result = torch.nn.functional.softmax(vector, dim=dim) else: mask = mask.float() while mask.dim() < vector.dim(): mask = mask.unsqueeze(1) if not memory_efficient: result = torch.nn.functional.softmax(vector * mask, dim=dim) result = result * mask result = result / (result.sum(dim=dim, keepdim=True) + 1e-13) else: masked_vector = vector.masked_fill((1 - mask).byte(), mask_fill_value) result = torch.nn.functional.softmax(masked_vector, dim=dim) return result class Model(torch.nn.Module): """ Scaled Dot-Product Attention """ def __init__(self, temperature, attn_dropout=0.1): super().__init__() self.temperature = temperature self.dropout = torch.nn.Dropout(attn_dropout) def forward(self, q, k, v, mask): attn = torch.bmm(q, k.transpose(1, 2)) attn = attn / self.temperature attn = masked_softmax(attn, mask, 2) __attn = self.dropout(attn) output = torch.bmm(__attn, v) return output, attn def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [4]
Net
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/ai/caitnpldotnv4k4oj67wyecd2ig4qcjrnmr35rmx6o2vxx245xs3.py # Topologically Sorted Source Nodes: [h_relu], Original ATen: [aten.clamp, aten.ge] # Source node to ATen node mapping: # h_relu => clamp_min # Graph fragment: # %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%view_1, 0), kwargs = {}) # %ge : [num_users=1] = call_function[target=torch.ops.aten.ge.Scalar](args = (%view_1, 0), kwargs = {}) triton_poi_fused_clamp_ge_0 = async_compile.triton('triton_poi_fused_clamp_ge_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clamp_ge_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clamp_ge_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp5 = tmp2 >= tmp3 tl.store(out_ptr0 + (x2), tmp4, xmask) tl.store(out_ptr1 + (x2), tmp5, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [h_relu], Original ATen: [aten.clamp, aten.ge] stream0 = get_raw_stream(0) triton_poi_fused_clamp_ge_0.run(buf0, primals_2, buf1, buf3, 256, grid=grid(256), stream=stream0) del primals_2 buf2 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [y_pred], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_5 return (reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), primals_4, buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch class Net(torch.nn.Module): """Implementing two layer nn.""" def __init__(self, D_IN, H, D_OUT): super().__init__() self.linear1 = torch.nn.Linear(D_IN, H) self.linear2 = torch.nn.Linear(H, D_OUT) def forward(self, x): h = self.linear1(x) h_relu = torch.clamp(h, 0) y_pred = self.linear2(h_relu) return y_pred def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'D_IN': 4, 'H': 4, 'D_OUT': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clamp_ge_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp5 = tmp2 >= tmp3 tl.store(out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr1 + x2, tmp5, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_clamp_ge_0[grid(256)](buf0, primals_2, buf1, buf3, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = buf0 del buf0 extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_5 return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), primals_4, buf3 class NetNew(torch.nn.Module): """Implementing two layer nn.""" def __init__(self, D_IN, H, D_OUT): super().__init__() self.linear1 = torch.nn.Linear(D_IN, H) self.linear2 = torch.nn.Linear(H, D_OUT) def forward(self, input_0): primals_1 = self.linear1.weight primals_2 = self.linear1.bias primals_4 = self.linear2.weight primals_5 = self.linear2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
ImadDabbura/deep_learning_with_pytorch
Net
false
5,348
[ "MIT" ]
1
0cac0614ab08b30654de192e540048cf4243a4e4
https://github.com/ImadDabbura/deep_learning_with_pytorch/tree/0cac0614ab08b30654de192e540048cf4243a4e4
import torch class Model(torch.nn.Module): """Implementing two layer nn.""" def __init__(self, D_IN, H, D_OUT): super().__init__() self.linear1 = torch.nn.Linear(D_IN, H) self.linear2 = torch.nn.Linear(H, D_OUT) def forward(self, x): h = self.linear1(x) h_relu = torch.clamp(h, 0) y_pred = self.linear2(h_relu) return y_pred def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 4, 4]
XSigmoidLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/of/cofynvv2vkexy5uq2apowq2ccvicws45zrrwqqzypwoqaac7hq2g.py # Topologically Sorted Source Nodes: [ey_t, mul, neg, exp, add, truediv, sub_1, mean], Original ATen: [aten.sub, aten.mul, aten.neg, aten.exp, aten.add, aten.div, aten.mean] # Source node to ATen node mapping: # add => add # exp => exp # ey_t => sub # mean => mean # mul => mul # neg => neg # sub_1 => sub_1 # truediv => div # Graph fragment: # %sub : [num_users=3] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %arg1_1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, 2), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%sub,), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%neg,), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%exp, 1), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul, %add), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div, %sub), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sub_1,), kwargs = {}) triton_per_fused_add_div_exp_mean_mul_neg_sub_0 = async_compile.triton('triton_per_fused_add_div_exp_mean_mul_neg_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_exp_mean_mul_neg_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_div_exp_mean_mul_neg_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.load(in_ptr1 + (r0), None) tmp2 = tmp0 - tmp1 tmp3 = 2.0 tmp4 = tmp2 * tmp3 tmp5 = -tmp2 tmp6 = tl_math.exp(tmp5) tmp7 = 1.0 tmp8 = tmp6 + tmp7 tmp9 = tmp4 / tmp8 tmp10 = tmp9 - tmp2 tmp11 = tl.broadcast_to(tmp10, [RBLOCK]) tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0)) tmp14 = 256.0 tmp15 = tmp13 / tmp14 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp15, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [ey_t, mul, neg, exp, add, truediv, sub_1, mean], Original ATen: [aten.sub, aten.mul, aten.neg, aten.exp, aten.add, aten.div, aten.mean] stream0 = get_raw_stream(0) triton_per_fused_add_div_exp_mean_mul_neg_sub_0.run(buf1, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class XSigmoidLoss(nn.Module): def __init__(self): super().__init__() def forward(self, y_t, y_prime_t): ey_t = y_t - y_prime_t return torch.mean(2 * ey_t / (1 + torch.exp(-ey_t)) - ey_t) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_div_exp_mean_mul_neg_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = tmp0 - tmp1 tmp3 = 2.0 tmp4 = tmp2 * tmp3 tmp5 = -tmp2 tmp6 = tl_math.exp(tmp5) tmp7 = 1.0 tmp8 = tmp6 + tmp7 tmp9 = tmp4 / tmp8 tmp10 = tmp9 - tmp2 tmp11 = tl.broadcast_to(tmp10, [RBLOCK]) tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0)) tmp14 = 256.0 tmp15 = tmp13 / tmp14 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp15, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_add_div_exp_mean_mul_neg_sub_0[grid(1)](buf1, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class XSigmoidLossNew(nn.Module): def __init__(self): super().__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
IshanBaliyan/DEEP-TFM_with_cGAN
XSigmoidLoss
false
5,349
[ "MIT" ]
1
8d711c025367031197e5b8c7c768fc9fbea406ce
https://github.com/IshanBaliyan/DEEP-TFM_with_cGAN/tree/8d711c025367031197e5b8c7c768fc9fbea406ce
import torch import torch.nn as nn class Model(nn.Module): def __init__(self): super().__init__() def forward(self, y_t, y_prime_t): ey_t = y_t - y_prime_t return torch.mean(2 * ey_t / (1 + torch.exp(-ey_t)) - ey_t) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
PLU
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/q5/cq53owht3av4rk5e7vhqlmaw5feoc27pex32srvhywpvlaia56po.py # Topologically Sorted Source Nodes: [add, mul, x1, sub_1, mul_1, x2, min1, min2], Original ATen: [aten.add, aten.mul, aten.sub, aten.minimum, aten.maximum] # Source node to ATen node mapping: # add => add # min1 => minimum # min2 => maximum # mul => mul # mul_1 => mul_1 # sub_1 => sub_1 # x1 => sub # x2 => add_1 # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg0_1, 1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, 0.1), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, 1), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, 1), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, 0.1), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, 1), kwargs = {}) # %minimum : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%add_1, %arg0_1), kwargs = {}) # %maximum : [num_users=1] = call_function[target=torch.ops.aten.maximum.default](args = (%sub, %minimum), kwargs = {}) triton_poi_fused_add_maximum_minimum_mul_sub_0 = async_compile.triton('triton_poi_fused_add_maximum_minimum_mul_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_maximum_minimum_mul_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_maximum_minimum_mul_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 1.0 tmp2 = tmp0 + tmp1 tmp3 = 0.1 tmp4 = tmp2 * tmp3 tmp5 = tmp4 - tmp1 tmp6 = tmp0 - tmp1 tmp7 = tmp6 * tmp3 tmp8 = tmp7 + tmp1 tmp9 = triton_helpers.minimum(tmp8, tmp0) tmp10 = triton_helpers.maximum(tmp5, tmp9) tl.store(out_ptr0 + (x0), tmp10, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [add, mul, x1, sub_1, mul_1, x2, min1, min2], Original ATen: [aten.add, aten.mul, aten.sub, aten.minimum, aten.maximum] stream0 = get_raw_stream(0) triton_poi_fused_add_maximum_minimum_mul_sub_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class PLU(nn.Module): """ y = max(alpha*(x+c)−c, min(alpha*(x−c)+c, x)) from PLU: The Piecewise Linear Unit Activation Function """ def __init__(self, alpha=0.1, c=1): super().__init__() self.alpha = alpha self.c = c def forward(self, x): x1 = self.alpha * (x + self.c) - self.c x2 = self.alpha * (x - self.c) + self.c min1 = torch.min(x2, x) min2 = torch.max(x1, min1) return min2 def __repr__(self): s = '{name} ({alhpa}, {c})' return s.format(name=self.__class__.__name__, **self.__dict__) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_maximum_minimum_mul_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 1.0 tmp2 = tmp0 + tmp1 tmp3 = 0.1 tmp4 = tmp2 * tmp3 tmp5 = tmp4 - tmp1 tmp6 = tmp0 - tmp1 tmp7 = tmp6 * tmp3 tmp8 = tmp7 + tmp1 tmp9 = triton_helpers.minimum(tmp8, tmp0) tmp10 = triton_helpers.maximum(tmp5, tmp9) tl.store(out_ptr0 + x0, tmp10, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_maximum_minimum_mul_sub_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf0, class PLUNew(nn.Module): """ y = max(alpha*(x+c)−c, min(alpha*(x−c)+c, x)) from PLU: The Piecewise Linear Unit Activation Function """ def __init__(self, alpha=0.1, c=1): super().__init__() self.alpha = alpha self.c = c def __repr__(self): s = '{name} ({alhpa}, {c})' return s.format(name=self.__class__.__name__, **self.__dict__) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
IrisDinge/YoloV3_DOTA
PLU
false
5,350
[ "MIT" ]
1
cdfe6375a2323e9ee162e50a46478d8a66529e6c
https://github.com/IrisDinge/YoloV3_DOTA/tree/cdfe6375a2323e9ee162e50a46478d8a66529e6c
import torch import torch.nn as nn class Model(nn.Module): """ y = max(alpha*(x+c)−c, min(alpha*(x−c)+c, x)) from PLU: The Piecewise Linear Unit Activation Function """ def __init__(self, alpha=0.1, c=1): super().__init__() self.alpha = alpha self.c = c def forward(self, x): x1 = self.alpha * (x + self.c) - self.c x2 = self.alpha * (x - self.c) + self.c min1 = torch.min(x2, x) min2 = torch.max(x1, min1) return min2 def __repr__(self): s = '{name} ({alhpa}, {c})' return s.format(name=self.__class__.__name__, **self.__dict__) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
MultipleConst
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/5l/c5lja42ucynhzaul7xv7gbc5hgzxyccxj7u735j75mxw7o725pey.py # Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul] # Source node to ATen node mapping: # mul => mul # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, 255), kwargs = {}) triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 255.0 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_mul_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class MultipleConst(nn.Module): def forward(self, data): return 255 * data def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 255.0 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class MultipleConstNew(nn.Module): def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
IvoryCandy/neural-style
MultipleConst
false
5,351
[ "Apache-2.0" ]
1
d9d73676479e36c1cbd6c9af36d857f80099504b
https://github.com/IvoryCandy/neural-style/tree/d9d73676479e36c1cbd6c9af36d857f80099504b
import torch import torch.nn as nn class Model(nn.Module): def forward(self, data): return 255 * data def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
Reorg
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/6r/c6raiiw2dpczfbllottnugfcinzrw6j32bxcyox7wsg7fmgcqro6.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.clone] # Source node to ATen node mapping: # x_1 => clone # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x3 = xindex % 4 x4 = (xindex // 4) y0 = yindex % 2 y1 = (yindex // 2) % 2 y2 = (yindex // 4) x6 = xindex y5 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (2*x3) + (8*y1) + (16*x4) + (64*y2)), xmask & ymask) tl.store(out_ptr0 + (x6 + (16*y5)), tmp0, xmask & ymask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 2, 2, 1, 4, 4), (64, 32, 16, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.clone] stream0 = get_raw_stream(0) triton_poi_fused_clone_0.run(arg0_1, buf0, 16, 16, grid=grid(16, 16), stream=stream0) del arg0_1 return (reinterpret_tensor(buf0, (4, 16, 2, 2), (64, 4, 2, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class Reorg(nn.Module): """ This layer reorganizes a tensor according to a stride. The dimensions 2,3 will be sliced by the stride and then stacked in dimension 1. (input must have 4 dimensions) Args: stride (int): stride to divide the input tensor """ def __init__(self, stride=2): super(Reorg, self).__init__() if not isinstance(stride, int): raise TypeError(f'stride is not an int [{type(stride)}]') self.stride = stride self.darknet = True def __repr__(self): return ( f'{self.__class__.__name__} (stride={self.stride}, darknet_compatible_mode={self.darknet})' ) def forward(self, x): assert x.data.dim() == 4 B = x.data.size(0) C = x.data.size(1) H = x.data.size(2) W = x.data.size(3) if H % self.stride != 0: raise ValueError( f'Dimension mismatch: {H} is not divisible by {self.stride}') if W % self.stride != 0: raise ValueError( f'Dimension mismatch: {W} is not divisible by {self.stride}') if self.darknet: x = x.view(B, C // self.stride ** 2, H, self.stride, W, self.stride ).contiguous() x = x.permute(0, 3, 5, 1, 2, 4).contiguous() x = x.view(B, -1, H // self.stride, W // self.stride) else: ws, hs = self.stride, self.stride x = x.view(B, C, H // hs, hs, W // ws, ws).transpose(3, 4 ).contiguous() x = x.view(B, C, H // hs * W // ws, hs * ws).transpose(2, 3 ).contiguous() x = x.view(B, C, hs * ws, H // hs, W // ws).transpose(1, 2 ).contiguous() x = x.view(B, hs * ws * C, H // hs, W // ws) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x3 = xindex % 4 x4 = xindex // 4 y0 = yindex % 2 y1 = yindex // 2 % 2 y2 = yindex // 4 x6 = xindex y5 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 2 * x3 + 8 * y1 + 16 * x4 + 64 * y2), xmask & ymask) tl.store(out_ptr0 + (x6 + 16 * y5), tmp0, xmask & ymask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 2, 2, 1, 4, 4), (64, 32, 16, 16, 4, 1 ), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(16, 16)](arg0_1, buf0, 16, 16, XBLOCK =16, YBLOCK=16, num_warps=4, num_stages=1) del arg0_1 return reinterpret_tensor(buf0, (4, 16, 2, 2), (64, 4, 2, 1), 0), class ReorgNew(nn.Module): """ This layer reorganizes a tensor according to a stride. The dimensions 2,3 will be sliced by the stride and then stacked in dimension 1. (input must have 4 dimensions) Args: stride (int): stride to divide the input tensor """ def __init__(self, stride=2): super(ReorgNew, self).__init__() if not isinstance(stride, int): raise TypeError(f'stride is not an int [{type(stride)}]') self.stride = stride self.darknet = True def __repr__(self): return ( f'{self.__class__.__name__} (stride={self.stride}, darknet_compatible_mode={self.darknet})' ) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
IrisDinge/YoloV3_DOTA
Reorg
false
5,352
[ "MIT" ]
1
cdfe6375a2323e9ee162e50a46478d8a66529e6c
https://github.com/IrisDinge/YoloV3_DOTA/tree/cdfe6375a2323e9ee162e50a46478d8a66529e6c
import torch import torch.nn as nn class Model(nn.Module): """ This layer reorganizes a tensor according to a stride. The dimensions 2,3 will be sliced by the stride and then stacked in dimension 1. (input must have 4 dimensions) Args: stride (int): stride to divide the input tensor """ def __init__(self, stride=2): super().__init__() if not isinstance(stride, int): raise TypeError(f'stride is not an int [{type(stride)}]') self.stride = stride self.darknet = True def __repr__(self): return ( f'{self.__class__.__name__} (stride={self.stride}, darknet_compatible_mode={self.darknet})' ) def forward(self, x): assert x.data.dim() == 4 B = x.data.size(0) C = x.data.size(1) H = x.data.size(2) W = x.data.size(3) if H % self.stride != 0: raise ValueError( f'Dimension mismatch: {H} is not divisible by {self.stride}') if W % self.stride != 0: raise ValueError( f'Dimension mismatch: {W} is not divisible by {self.stride}') if self.darknet: x = x.view(B, C // self.stride ** 2, H, self.stride, W, self.stride ).contiguous() x = x.permute(0, 3, 5, 1, 2, 4).contiguous() x = x.view(B, -1, H // self.stride, W // self.stride) else: ws, hs = self.stride, self.stride x = x.view(B, C, H // hs, hs, W // ws, ws).transpose(3, 4 ).contiguous() x = x.view(B, C, H // hs * W // ws, hs * ws).transpose(2, 3 ).contiguous() x = x.view(B, C, hs * ws, H // hs, W // ws).transpose(1, 2 ).contiguous() x = x.view(B, hs * ws * C, H // hs, W // ws) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
L2Norm
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/56/c56bdebalzrulrb6n2p5lgqb24oyosta4crzvzs4bckxm6irliug.py # Topologically Sorted Source Nodes: [norm, l2_norm, x_norm, y, y_1], Original ATen: [aten.linalg_vector_norm, aten.add, aten.div, aten.mul] # Source node to ATen node mapping: # l2_norm => add # norm => pow_1, pow_2, sum_1 # x_norm => div # y => mul # y_1 => add_1 # Graph fragment: # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_1, 2), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1], True), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%pow_2, 1e-06), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_1, %add), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, %primals_2), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %primals_3), kwargs = {}) triton_poi_fused_add_div_linalg_vector_norm_mul_0 = async_compile.triton('triton_poi_fused_add_div_linalg_vector_norm_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_linalg_vector_norm_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_linalg_vector_norm_mul_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) x1 = (xindex // 16) % 4 tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp18 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-06 tmp14 = tmp12 + tmp13 tmp15 = tmp0 / tmp14 tmp17 = tmp15 * tmp16 tmp19 = tmp17 + tmp18 tl.store(out_ptr0 + (x3), tmp19, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_3, (1, 4, 1, 1), (4, 1, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [norm, l2_norm, x_norm, y, y_1], Original ATen: [aten.linalg_vector_norm, aten.add, aten.div, aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_add_div_linalg_vector_norm_mul_0.run(primals_1, primals_2, primals_3, buf0, 256, grid=grid(256), stream=stream0) del primals_2 del primals_3 return (buf0, primals_1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class Scale(nn.Module): def __init__(self, nchannels, bias=True, init_scale=1.0): super().__init__() self.nchannels = nchannels self.weight = nn.Parameter(torch.Tensor(1, nchannels, 1, 1)) if bias: self.bias = nn.Parameter(torch.Tensor(1, nchannels, 1, 1)) else: self.register_parameter('bias', None) self.reset_parameters(init_scale) def reset_parameters(self, init_scale=1.0): self.weight.data.fill_(init_scale) if self.bias is not None: self.bias.data.fill_(0.0) def forward(self, x): y = x * self.weight if self.bias is not None: y += self.bias return y def __repr__(self): s = '{} ({}, {})' return s.format(self.__class__.__name__, self.nchannels, self.bias is not None) class L2Norm(nn.Module): def __init__(self, nchannels, bias=True): super().__init__() self.scale = Scale(nchannels, bias=bias) self.nchannels = nchannels self.eps = 1e-06 def forward(self, x): l2_norm = x.norm(2, dim=1, keepdim=True) + self.eps x_norm = x.div(l2_norm) y = self.scale(x_norm) return y def __repr__(self): s = '{name} ({nchannels})' return s.format(name=self.__class__.__name__, **self.__dict__) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'nchannels': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_div_linalg_vector_norm_mul_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 x1 = xindex // 16 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp16 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp18 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-06 tmp14 = tmp12 + tmp13 tmp15 = tmp0 / tmp14 tmp17 = tmp15 * tmp16 tmp19 = tmp17 + tmp18 tl.store(out_ptr0 + x3, tmp19, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_3, (1, 4, 1, 1), (4, 1, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_linalg_vector_norm_mul_0[grid(256)](primals_1, primals_2, primals_3, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 del primals_3 return buf0, primals_1 class Scale(nn.Module): def __init__(self, nchannels, bias=True, init_scale=1.0): super().__init__() self.nchannels = nchannels self.weight = nn.Parameter(torch.Tensor(1, nchannels, 1, 1)) if bias: self.bias = nn.Parameter(torch.Tensor(1, nchannels, 1, 1)) else: self.register_parameter('bias', None) self.reset_parameters(init_scale) def reset_parameters(self, init_scale=1.0): self.weight.data.fill_(init_scale) if self.bias is not None: self.bias.data.fill_(0.0) def forward(self, x): y = x * self.weight if self.bias is not None: y += self.bias return y def __repr__(self): s = '{} ({}, {})' return s.format(self.__class__.__name__, self.nchannels, self.bias is not None) class L2NormNew(nn.Module): def __init__(self, nchannels, bias=True): super().__init__() self.scale = Scale(nchannels, bias=bias) self.nchannels = nchannels self.eps = 1e-06 def __repr__(self): s = '{name} ({nchannels})' return s.format(name=self.__class__.__name__, **self.__dict__) def forward(self, input_0): primals_2 = self.scale.weight primals_3 = self.scale.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
IrisDinge/YoloV3_DOTA
L2Norm
false
5,353
[ "MIT" ]
1
cdfe6375a2323e9ee162e50a46478d8a66529e6c
https://github.com/IrisDinge/YoloV3_DOTA/tree/cdfe6375a2323e9ee162e50a46478d8a66529e6c
import torch import torch.nn as nn class Scale(nn.Module): def __init__(self, nchannels, bias=True, init_scale=1.0): super().__init__() self.nchannels = nchannels self.weight = nn.Parameter(torch.Tensor(1, nchannels, 1, 1)) if bias: self.bias = nn.Parameter(torch.Tensor(1, nchannels, 1, 1)) else: self.register_parameter('bias', None) self.reset_parameters(init_scale) def reset_parameters(self, init_scale=1.0): self.weight.data.fill_(init_scale) if self.bias is not None: self.bias.data.fill_(0.0) def forward(self, x): y = x * self.weight if self.bias is not None: y += self.bias return y def __repr__(self): s = '{} ({}, {})' return s.format(self.__class__.__name__, self.nchannels, self.bias is not None) class Model(nn.Module): def __init__(self, nchannels, bias=True): super().__init__() self.scale = Scale(nchannels, bias=bias) self.nchannels = nchannels self.eps = 1e-06 def forward(self, x): l2_norm = x.norm(2, dim=1, keepdim=True) + self.eps x_norm = x.div(l2_norm) y = self.scale(x_norm) return y def __repr__(self): s = '{name} ({nchannels})' return s.format(name=self.__class__.__name__, **self.__dict__) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4]
Block
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/f2/cf23blhmrqff3yhtwgcpt4rhmfkv3i25ry4dtkijev5qamd7hqxq.py # Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm] # Source node to ATen node mapping: # layer_norm => add, rsqrt, var_mean # Graph fragment: # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_3, [2]), kwargs = {correction: 0, keepdim: True}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-06), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) triton_poi_fused_native_layer_norm_0 = async_compile.triton('triton_poi_fused_native_layer_norm_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-06 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + (x0), tmp8, xmask) tl.store(out_ptr1 + (x0), tmp23, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/vs/cvsfvbs4wlaqvwxm3svg65dnhcq336ptudvn6xetnbnrtzj7xssn.py # Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm] # Source node to ATen node mapping: # layer_norm => add, add_1, mul, mul_1, rsqrt, sub, var_mean # Graph fragment: # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_3, [2]), kwargs = {correction: 0, keepdim: True}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-06), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_3, %getitem_1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_1), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_2), kwargs = {}) triton_poi_fused_native_layer_norm_1 = async_compile.triton('triton_poi_fused_native_layer_norm_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/3r/c3rfy3ljjc2bfodnr5gm65jr7ew6v6kno6w6jzahlupuqxbpvfkw.py # Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone] # Source node to ATen node mapping: # matmul => clone # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_2 = async_compile.triton('triton_poi_fused_clone_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (12*x2) + (48*y1)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/aw/cawvwx3nv7ipnpnf2hcgwz5usu7vsw5yynj5ofrunhktjwqff5vq.py # Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone] # Source node to ATen node mapping: # matmul => clone_1 # Graph fragment: # %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_1,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_3 = async_compile.triton('triton_poi_fused_clone_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (4 + y0 + (12*x2) + (48*y1)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/p5/cp5wuljbdcz2dl2xvl4imkn5wmtmrnbb7mnld5glztiqavldlheh.py # Topologically Sorted Source Nodes: [attn_1], Original ATen: [aten._softmax] # Source node to ATen node mapping: # attn_1 => exp # Graph fragment: # %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_5, 1), kwargs = {}) # %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [-1], True), kwargs = {}) # %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {}) # %mul_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_tensor, 1.0), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%mul_tensor_1,), kwargs = {}) triton_poi_fused__softmax_4 = async_compile.triton('triton_poi_fused__softmax_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp3 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = tmp14 * tmp1 tmp16 = tl_math.exp(tmp15) tl.store(out_ptr0 + (x2), tmp16, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/a4/ca4u6hbohfqkgchihihlu5hrf3vuqm27r2ncsg7xb6g4ikttl2at.py # Topologically Sorted Source Nodes: [attn_1], Original ATen: [aten._softmax] # Source node to ATen node mapping: # attn_1 => div, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_5 = async_compile.triton('triton_poi_fused__softmax_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_5(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/vv/cvvhis67uzj3m3ebbd4sgghaemqhihabasphltk5wytqdd6fe74t.py # Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.clone] # Source node to ATen node mapping: # matmul_1 => clone_3 # Graph fragment: # %clone_3 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_3,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_6 = async_compile.triton('triton_poi_fused_clone_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (8 + y0 + (12*x2) + (48*y1)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/lw/clwfsjrjxeb2gmxy5p3lplvcrvrn37iuw4atjria32bxp2jajrtc.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.clone] # Source node to ATen node mapping: # x_1 => clone_4 # Graph fragment: # %clone_4 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%view_9,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_7 = async_compile.triton('triton_poi_fused_clone_7', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_7(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/5y/c5yhyv7emyc7i2ozpvns6tsiqcvdzktqqpohy4sedfe7aihkojch.py # Topologically Sorted Source Nodes: [x_1, x_3, layer_norm_1], Original ATen: [aten.add, aten.native_layer_norm] # Source node to ATen node mapping: # layer_norm_1 => var_mean_1 # x_1 => add_2 # x_3 => add_3 # Graph fragment: # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_11, %primals_6), kwargs = {}) # %add_3 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_3, %add_2), kwargs = {}) # %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add_3, [2]), kwargs = {correction: 0, keepdim: True}) triton_poi_fused_add_native_layer_norm_8 = async_compile.triton('triton_poi_fused_add_native_layer_norm_8', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 12, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_native_layer_norm_8(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + (0)) tmp3 = tl.broadcast_to(tmp2, [XBLOCK]) tmp6 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr2 + (1)) tmp9 = tl.broadcast_to(tmp8, [XBLOCK]) tmp13 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr2 + (2)) tmp16 = tl.broadcast_to(tmp15, [XBLOCK]) tmp20 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp21 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp22 = tl.load(in_ptr2 + (3)) tmp23 = tl.broadcast_to(tmp22, [XBLOCK]) tmp4 = tmp1 + tmp3 tmp5 = tmp0 + tmp4 tmp10 = tmp7 + tmp9 tmp11 = tmp6 + tmp10 tmp12 = tmp5 + tmp11 tmp17 = tmp14 + tmp16 tmp18 = tmp13 + tmp17 tmp19 = tmp12 + tmp18 tmp24 = tmp21 + tmp23 tmp25 = tmp20 + tmp24 tmp26 = tmp19 + tmp25 tmp27 = 4.0 tmp28 = tmp26 / tmp27 tmp29 = tmp5 - tmp28 tmp30 = tmp29 * tmp29 tmp31 = tmp11 - tmp28 tmp32 = tmp31 * tmp31 tmp33 = tmp30 + tmp32 tmp34 = tmp18 - tmp28 tmp35 = tmp34 * tmp34 tmp36 = tmp33 + tmp35 tmp37 = tmp25 - tmp28 tmp38 = tmp37 * tmp37 tmp39 = tmp36 + tmp38 tmp40 = tmp39 / tmp27 tl.store(out_ptr0 + (x0), tmp28, xmask) tl.store(out_ptr1 + (x0), tmp40, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/wk/cwkvd3urdgah2mwiynddchgrzzinh7dhvvfxtkikqtjcxphriloz.py # Topologically Sorted Source Nodes: [x_1, x_3, layer_norm_1], Original ATen: [aten.add, aten.native_layer_norm] # Source node to ATen node mapping: # layer_norm_1 => add_4, add_5, mul_3, mul_4, rsqrt_1, sub_2 # x_1 => add_2 # x_3 => add_3 # Graph fragment: # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_11, %primals_6), kwargs = {}) # %add_3 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_3, %add_2), kwargs = {}) # %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-06), kwargs = {}) # %rsqrt_1 : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_4,), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_3, %getitem_3), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_2, %rsqrt_1), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_3, %primals_7), kwargs = {}) # %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_4, %primals_8), kwargs = {}) triton_poi_fused_add_native_layer_norm_9 = async_compile.triton('triton_poi_fused_add_native_layer_norm_9', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_native_layer_norm_9(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x2), xmask) tmp2 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + (x1), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr6 + (x0), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tmp6 = tmp4 - tmp5 tmp8 = 1e-06 tmp9 = tmp7 + tmp8 tmp10 = libdevice.rsqrt(tmp9) tmp11 = tmp6 * tmp10 tmp13 = tmp11 * tmp12 tmp15 = tmp13 + tmp14 tl.store(out_ptr0 + (x2), tmp15, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/b4/cb43jhxvcrefkhdp7ixdoh6nmvez5h55vhlzkxtasuovu5ru7pe5.py # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.gelu] # Source node to ATen node mapping: # x_5 => add_6, erf, mul_5, mul_6, mul_7 # Graph fragment: # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_13, 0.5), kwargs = {}) # %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_13, 0.7071067811865476), kwargs = {}) # %erf : [num_users=1] = call_function[target=torch.ops.aten.erf.default](args = (%mul_6,), kwargs = {}) # %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%erf, 1), kwargs = {}) # %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_5, %add_6), kwargs = {}) triton_poi_fused_gelu_10 = async_compile.triton('triton_poi_fused_gelu_10', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_gelu_10', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_gelu_10(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = 0.7071067811865476 tmp4 = tmp0 * tmp3 tmp5 = libdevice.erf(tmp4) tmp6 = 1.0 tmp7 = tmp5 + tmp6 tmp8 = tmp2 * tmp7 tl.store(out_ptr0 + (x0), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/pu/cpuql3oz4hmaygynopg7lq7xhfiv7hr7pr4vyzhfpmw34jymdp7q.py # Topologically Sorted Source Nodes: [x_1, x_3, x_9], Original ATen: [aten.add] # Source node to ATen node mapping: # x_1 => add_2 # x_3 => add_3 # x_9 => add_7 # Graph fragment: # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_11, %primals_6), kwargs = {}) # %add_3 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_3, %add_2), kwargs = {}) # %add_7 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_3, %view_15), kwargs = {}) triton_poi_fused_add_11 = async_compile.triton('triton_poi_fused_add_11', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_11', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_11(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x2), xmask) tmp2 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_out_ptr0 + (x2), xmask) tmp6 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tmp7 = tmp5 + tmp6 tmp8 = tmp4 + tmp7 tl.store(in_out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12 = args args.clear() assert_size_stride(primals_1, (4, ), (1, )) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (12, 4), (4, 1)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4, ), (1, )) assert_size_stride(primals_7, (4, ), (1, )) assert_size_stride(primals_8, (4, ), (1, )) assert_size_stride(primals_9, (16, 4), (4, 1)) assert_size_stride(primals_10, (16, ), (1, )) assert_size_stride(primals_11, (4, 16), (16, 1)) assert_size_stride(primals_12, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf1 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) # Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm] stream0 = get_raw_stream(0) triton_poi_fused_native_layer_norm_0.run(primals_3, buf0, buf1, 16, grid=grid(16), stream=stream0) buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm] triton_poi_fused_native_layer_norm_1.run(primals_3, buf0, buf1, primals_1, primals_2, buf2, 64, grid=grid(64), stream=stream0) del primals_1 del primals_2 buf3 = empty_strided_cuda((16, 12), (12, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 12), (1, 4), 0), out=buf3) buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone] triton_poi_fused_clone_2.run(buf3, buf4, 16, 4, grid=grid(16, 4), stream=stream0) buf5 = empty_strided_cuda((4, 4, 1, 4), (16, 4, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone] triton_poi_fused_clone_3.run(buf3, buf5, 16, 4, grid=grid(16, 4), stream=stream0) buf6 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf5, (16, 1, 4), (4, 0, 1), 0), out=buf6) buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [attn_1], Original ATen: [aten._softmax] triton_poi_fused__softmax_4.run(buf6, buf7, 256, grid=grid(256), stream=stream0) buf8 = reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf6 # reuse # Topologically Sorted Source Nodes: [attn_1], Original ATen: [aten._softmax] triton_poi_fused__softmax_5.run(buf7, buf8, 256, grid=grid(256), stream=stream0) buf9 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.clone] triton_poi_fused_clone_6.run(buf3, buf9, 16, 4, grid=grid(16, 4), stream=stream0) del buf3 buf10 = empty_strided_cuda((16, 4, 1), (4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf8, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf9, (16, 4, 1), (4, 1, 0), 0), out=buf10) buf11 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.clone] triton_poi_fused_clone_7.run(buf10, buf11, 16, 4, grid=grid(16, 4), stream=stream0) buf12 = reinterpret_tensor(buf10, (16, 4), (4, 1), 0); del buf10 # reuse # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf11, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf12) buf13 = buf1; del buf1 # reuse buf14 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [x_1, x_3, layer_norm_1], Original ATen: [aten.add, aten.native_layer_norm] triton_poi_fused_add_native_layer_norm_8.run(primals_3, buf12, primals_6, buf13, buf14, 16, grid=grid(16), stream=stream0) buf15 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_1, x_3, layer_norm_1], Original ATen: [aten.add, aten.native_layer_norm] triton_poi_fused_add_native_layer_norm_9.run(primals_3, buf12, primals_6, buf13, buf14, primals_7, primals_8, buf15, 64, grid=grid(64), stream=stream0) del buf13 del buf14 del primals_8 buf16 = reinterpret_tensor(buf7, (16, 16), (16, 1), 0); del buf7 # reuse # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.addmm] extern_kernels.addmm(primals_10, reinterpret_tensor(buf15, (16, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 16), (1, 4), 0), alpha=1, beta=1, out=buf16) del primals_10 buf17 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32) # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.gelu] triton_poi_fused_gelu_10.run(buf16, buf17, 256, grid=grid(256), stream=stream0) buf18 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf17, (16, 16), (16, 1), 0), reinterpret_tensor(primals_11, (16, 4), (1, 16), 0), out=buf18) buf19 = reinterpret_tensor(buf18, (4, 4, 4), (16, 4, 1), 0); del buf18 # reuse # Topologically Sorted Source Nodes: [x_1, x_3, x_9], Original ATen: [aten.add] triton_poi_fused_add_11.run(buf19, primals_3, buf12, primals_6, primals_12, 64, grid=grid(64), stream=stream0) del primals_12 return (buf19, primals_3, primals_6, primals_7, reinterpret_tensor(buf2, (16, 4), (4, 1), 0), buf8, reinterpret_tensor(buf11, (16, 4), (4, 1), 0), buf12, reinterpret_tensor(buf15, (16, 4), (4, 1), 0), buf16, reinterpret_tensor(buf17, (16, 16), (16, 1), 0), primals_11, primals_9, primals_5, reinterpret_tensor(buf9, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf5, (16, 4, 1), (4, 1, 4), 0), primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((12, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((4, 16), (16, 1), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn from functools import partial class Mlp(nn.Module): def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.0): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features self.fc1 = nn.Linear(in_features, hidden_features) self.act = act_layer() self.fc2 = nn.Linear(hidden_features, out_features) self.drop = nn.Dropout(drop) def forward(self, x): x = self.fc1(x) x = self.act(x) x = self.drop(x) x = self.fc2(x) x = self.drop(x) return x class Attention(nn.Module): def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0.0, proj_drop=0.0): super().__init__() self.num_heads = num_heads head_dim = dim // num_heads self.scale = qk_scale or head_dim ** -0.5 self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop) def forward(self, x): B, N, C = x.shape qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads ).permute(2, 0, 3, 1, 4) q, k, v = qkv[0], qkv[1], qkv[2] attn = q @ k.transpose(-2, -1) * self.scale attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = (attn @ v).transpose(1, 2).reshape(B, N, C) x = self.proj(x) x = self.proj_drop(x) return x class Block(nn.Module): def __init__(self, dim, num_heads, mlp_ratio=4.0, qkv_bias=False, qk_scale=None, drop=0.0, attn_drop=0.0, drop_path=0.0, act_layer=nn .GELU, norm_layer=partial(nn.LayerNorm, eps=1e-06), vis=False): super().__init__() self.norm1 = norm_layer(dim) self.attn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop) self.drop_path = DropPath(drop_path ) if drop_path > 0.0 else nn.Identity() self.norm2 = norm_layer(dim) mlp_hidden_dim = int(dim * mlp_ratio) self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) def forward(self, x): x = x + self.drop_path(self.attn(self.norm1(x))) x = x + self.drop_path(self.mlp(self.norm2(x))) return x def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'dim': 4, 'num_heads': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn from functools import partial assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-06 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 12 * x2 + 48 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (4 + y0 + 12 * x2 + 48 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = tmp14 * tmp1 tmp16 = tl_math.exp(tmp15) tl.store(out_ptr0 + x2, tmp16, xmask) @triton.jit def triton_poi_fused__softmax_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_clone_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (8 + y0 + 12 * x2 + 48 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_clone_7(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_add_native_layer_norm_8(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + 0) tmp3 = tl.broadcast_to(tmp2, [XBLOCK]) tmp6 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr2 + 1) tmp9 = tl.broadcast_to(tmp8, [XBLOCK]) tmp13 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp14 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp15 = tl.load(in_ptr2 + 2) tmp16 = tl.broadcast_to(tmp15, [XBLOCK]) tmp20 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp21 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp22 = tl.load(in_ptr2 + 3) tmp23 = tl.broadcast_to(tmp22, [XBLOCK]) tmp4 = tmp1 + tmp3 tmp5 = tmp0 + tmp4 tmp10 = tmp7 + tmp9 tmp11 = tmp6 + tmp10 tmp12 = tmp5 + tmp11 tmp17 = tmp14 + tmp16 tmp18 = tmp13 + tmp17 tmp19 = tmp12 + tmp18 tmp24 = tmp21 + tmp23 tmp25 = tmp20 + tmp24 tmp26 = tmp19 + tmp25 tmp27 = 4.0 tmp28 = tmp26 / tmp27 tmp29 = tmp5 - tmp28 tmp30 = tmp29 * tmp29 tmp31 = tmp11 - tmp28 tmp32 = tmp31 * tmp31 tmp33 = tmp30 + tmp32 tmp34 = tmp18 - tmp28 tmp35 = tmp34 * tmp34 tmp36 = tmp33 + tmp35 tmp37 = tmp25 - tmp28 tmp38 = tmp37 * tmp37 tmp39 = tmp36 + tmp38 tmp40 = tmp39 / tmp27 tl.store(out_ptr0 + x0, tmp28, xmask) tl.store(out_ptr1 + x0, tmp40, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_9(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp2 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr6 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tmp6 = tmp4 - tmp5 tmp8 = 1e-06 tmp9 = tmp7 + tmp8 tmp10 = libdevice.rsqrt(tmp9) tmp11 = tmp6 * tmp10 tmp13 = tmp11 * tmp12 tmp15 = tmp13 + tmp14 tl.store(out_ptr0 + x2, tmp15, xmask) @triton.jit def triton_poi_fused_gelu_10(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = 0.7071067811865476 tmp4 = tmp0 * tmp3 tmp5 = libdevice.erf(tmp4) tmp6 = 1.0 tmp7 = tmp5 + tmp6 tmp8 = tmp2 * tmp7 tl.store(out_ptr0 + x0, tmp8, xmask) @triton.jit def triton_poi_fused_add_11(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp2 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_out_ptr0 + x2, xmask) tmp6 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tmp7 = tmp5 + tmp6 tmp8 = tmp4 + tmp7 tl.store(in_out_ptr0 + x2, tmp8, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12 ) = args args.clear() assert_size_stride(primals_1, (4,), (1,)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (12, 4), (4, 1)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (16, 4), (4, 1)) assert_size_stride(primals_10, (16,), (1,)) assert_size_stride(primals_11, (4, 16), (16, 1)) assert_size_stride(primals_12, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf1 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) get_raw_stream(0) triton_poi_fused_native_layer_norm_0[grid(16)](primals_3, buf0, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_native_layer_norm_1[grid(64)](primals_3, buf0, buf1, primals_1, primals_2, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_1 del primals_2 buf3 = empty_strided_cuda((16, 12), (12, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 12), (1, 4), 0), out=buf3) buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) triton_poi_fused_clone_2[grid(16, 4)](buf3, buf4, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf5 = empty_strided_cuda((4, 4, 1, 4), (16, 4, 4, 1), torch.float32) triton_poi_fused_clone_3[grid(16, 4)](buf3, buf5, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf6 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf5, (16, 1, 4), (4, 0, 1), 0), out=buf6) buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_4[grid(256)](buf6, buf7, 256, XBLOCK=128, num_warps=4, num_stages=1) buf8 = reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf6 triton_poi_fused__softmax_5[grid(256)](buf7, buf8, 256, XBLOCK=256, num_warps=4, num_stages=1) buf9 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) triton_poi_fused_clone_6[grid(16, 4)](buf3, buf9, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) del buf3 buf10 = empty_strided_cuda((16, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf8, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf9, (16, 4, 1), (4, 1, 0), 0), out=buf10) buf11 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_clone_7[grid(16, 4)](buf10, buf11, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf12 = reinterpret_tensor(buf10, (16, 4), (4, 1), 0) del buf10 extern_kernels.mm(reinterpret_tensor(buf11, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf12) buf13 = buf1 del buf1 buf14 = buf0 del buf0 triton_poi_fused_add_native_layer_norm_8[grid(16)](primals_3, buf12, primals_6, buf13, buf14, 16, XBLOCK=16, num_warps=1, num_stages=1) buf15 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_native_layer_norm_9[grid(64)](primals_3, buf12, primals_6, buf13, buf14, primals_7, primals_8, buf15, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf13 del buf14 del primals_8 buf16 = reinterpret_tensor(buf7, (16, 16), (16, 1), 0) del buf7 extern_kernels.addmm(primals_10, reinterpret_tensor(buf15, (16, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 16), (1, 4), 0), alpha=1, beta=1, out=buf16) del primals_10 buf17 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32) triton_poi_fused_gelu_10[grid(256)](buf16, buf17, 256, XBLOCK=128, num_warps=4, num_stages=1) buf18 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf17, (16, 16), (16, 1), 0), reinterpret_tensor(primals_11, (16, 4), (1, 16), 0), out=buf18) buf19 = reinterpret_tensor(buf18, (4, 4, 4), (16, 4, 1), 0) del buf18 triton_poi_fused_add_11[grid(64)](buf19, primals_3, buf12, primals_6, primals_12, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_12 return buf19, primals_3, primals_6, primals_7, reinterpret_tensor(buf2, (16, 4), (4, 1), 0), buf8, reinterpret_tensor(buf11, (16, 4), (4, 1), 0 ), buf12, reinterpret_tensor(buf15, (16, 4), (4, 1), 0 ), buf16, reinterpret_tensor(buf17, (16, 16), (16, 1), 0 ), primals_11, primals_9, primals_5, reinterpret_tensor(buf9, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 1, 1), 0 ), reinterpret_tensor(buf5, (16, 4, 1), (4, 1, 4), 0), primals_4 class Mlp(nn.Module): def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.0): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features self.fc1 = nn.Linear(in_features, hidden_features) self.act = act_layer() self.fc2 = nn.Linear(hidden_features, out_features) self.drop = nn.Dropout(drop) def forward(self, x): x = self.fc1(x) x = self.act(x) x = self.drop(x) x = self.fc2(x) x = self.drop(x) return x class Attention(nn.Module): def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0.0, proj_drop=0.0): super().__init__() self.num_heads = num_heads head_dim = dim // num_heads self.scale = qk_scale or head_dim ** -0.5 self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop) def forward(self, x): B, N, C = x.shape qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads ).permute(2, 0, 3, 1, 4) q, k, v = qkv[0], qkv[1], qkv[2] attn = q @ k.transpose(-2, -1) * self.scale attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = (attn @ v).transpose(1, 2).reshape(B, N, C) x = self.proj(x) x = self.proj_drop(x) return x class BlockNew(nn.Module): def __init__(self, dim, num_heads, mlp_ratio=4.0, qkv_bias=False, qk_scale=None, drop=0.0, attn_drop=0.0, drop_path=0.0, act_layer=nn .GELU, norm_layer=partial(nn.LayerNorm, eps=1e-06), vis=False): super().__init__() self.norm1 = norm_layer(dim) self.attn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop) self.drop_path = DropPath(drop_path ) if drop_path > 0.0 else nn.Identity() self.norm2 = norm_layer(dim) mlp_hidden_dim = int(dim * mlp_ratio) self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) def forward(self, input_0): primals_1 = self.norm1.weight primals_2 = self.norm1.bias primals_4 = self.attn.qkv.weight primals_5 = self.attn.proj.weight primals_6 = self.attn.proj.bias primals_7 = self.norm2.weight primals_8 = self.norm2.bias primals_9 = self.mlp.fc1.weight primals_10 = self.mlp.fc1.bias primals_11 = self.mlp.fc2.weight primals_12 = self.mlp.fc2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12]) return output[0]
Huzhen757/Conformer
Block
false
5,354
[ "Apache-2.0" ]
1
4f7a80cec28b9ced8c0225a85a32997f7cd2b93c
https://github.com/Huzhen757/Conformer/tree/4f7a80cec28b9ced8c0225a85a32997f7cd2b93c
import torch import torch.nn as nn from functools import partial class Mlp(nn.Module): def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.0): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features self.fc1 = nn.Linear(in_features, hidden_features) self.act = act_layer() self.fc2 = nn.Linear(hidden_features, out_features) self.drop = nn.Dropout(drop) def forward(self, x): x = self.fc1(x) x = self.act(x) x = self.drop(x) x = self.fc2(x) x = self.drop(x) return x class Attention(nn.Module): def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0.0, proj_drop=0.0): super().__init__() self.num_heads = num_heads head_dim = dim // num_heads self.scale = qk_scale or head_dim ** -0.5 self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop) def forward(self, x): B, N, C = x.shape qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads ).permute(2, 0, 3, 1, 4) q, k, v = qkv[0], qkv[1], qkv[2] attn = q @ k.transpose(-2, -1) * self.scale attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = (attn @ v).transpose(1, 2).reshape(B, N, C) x = self.proj(x) x = self.proj_drop(x) return x class Model(nn.Module): def __init__(self, dim, num_heads, mlp_ratio=4.0, qkv_bias=False, qk_scale=None, drop=0.0, attn_drop=0.0, drop_path=0.0, act_layer=nn .GELU, norm_layer=partial(nn.LayerNorm, eps=1e-06), vis=False): super().__init__() self.norm1 = norm_layer(dim) self.attn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop) self.drop_path = DropPath(drop_path ) if drop_path > 0.0 else nn.Identity() self.norm2 = norm_layer(dim) mlp_hidden_dim = int(dim * mlp_ratio) self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) def forward(self, x): x = x + self.drop_path(self.attn(self.norm1(x))) x = x + self.drop_path(self.mlp(self.norm2(x))) return x def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [4, 4]
ScaleReLU
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/fx/cfxspn4657zehhj3fq3qk4ovlv5s3lz5r7b4oicuczfahhe6wnwy.py # Topologically Sorted Source Nodes: [y, y_1, y_2], Original ATen: [aten.mul, aten.add, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # y => mul # y_1 => add # y_2 => relu # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %primals_1), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %primals_3), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_add_mul_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_add_mul_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*i1', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_relu_threshold_backward_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_relu_threshold_backward_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 4 tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tmp4 = tmp2 + tmp3 tmp5 = tl.full([1], 0, tl.int32) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = 0.0 tmp8 = tmp6 <= tmp7 tl.store(out_ptr0 + (x3), tmp6, xmask) tl.store(out_ptr1 + (x3), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (1, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (1, 4, 1, 1), (4, 1, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [y, y_1, y_2], Original ATen: [aten.mul, aten.add, aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_add_mul_relu_threshold_backward_0.run(primals_2, primals_1, primals_3, buf0, buf1, 256, grid=grid(256), stream=stream0) del primals_1 del primals_3 return (buf0, primals_2, buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class Scale(nn.Module): def __init__(self, nchannels, bias=True, init_scale=1.0): super().__init__() self.nchannels = nchannels self.weight = nn.Parameter(torch.Tensor(1, nchannels, 1, 1)) if bias: self.bias = nn.Parameter(torch.Tensor(1, nchannels, 1, 1)) else: self.register_parameter('bias', None) self.reset_parameters(init_scale) def reset_parameters(self, init_scale=1.0): self.weight.data.fill_(init_scale) if self.bias is not None: self.bias.data.fill_(0.0) def forward(self, x): y = x * self.weight if self.bias is not None: y += self.bias return y def __repr__(self): s = '{} ({}, {})' return s.format(self.__class__.__name__, self.nchannels, self.bias is not None) class ScaleReLU(nn.Module): def __init__(self, nchannels): super().__init__() self.scale = Scale(nchannels) self.relu = nn.ReLU(inplace=True) self.nchannels = nchannels def forward(self, x): x1 = self.scale(x) y = self.relu(x1) return y def __repr__(self): s = '{name} ({nchannels})' return s.format(name=self.__class__.__name__, **self.__dict__) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'nchannels': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_mul_relu_threshold_backward_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tmp4 = tmp2 + tmp3 tmp5 = tl.full([1], 0, tl.int32) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = 0.0 tmp8 = tmp6 <= tmp7 tl.store(out_ptr0 + x3, tmp6, xmask) tl.store(out_ptr1 + x3, tmp8, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (1, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (1, 4, 1, 1), (4, 1, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_add_mul_relu_threshold_backward_0[grid(256)](primals_2 , primals_1, primals_3, buf0, buf1, 256, XBLOCK=256, num_warps= 4, num_stages=1) del primals_1 del primals_3 return buf0, primals_2, buf1 class Scale(nn.Module): def __init__(self, nchannels, bias=True, init_scale=1.0): super().__init__() self.nchannels = nchannels self.weight = nn.Parameter(torch.Tensor(1, nchannels, 1, 1)) if bias: self.bias = nn.Parameter(torch.Tensor(1, nchannels, 1, 1)) else: self.register_parameter('bias', None) self.reset_parameters(init_scale) def reset_parameters(self, init_scale=1.0): self.weight.data.fill_(init_scale) if self.bias is not None: self.bias.data.fill_(0.0) def forward(self, x): y = x * self.weight if self.bias is not None: y += self.bias return y def __repr__(self): s = '{} ({}, {})' return s.format(self.__class__.__name__, self.nchannels, self.bias is not None) class ScaleReLUNew(nn.Module): def __init__(self, nchannels): super().__init__() self.scale = Scale(nchannels) self.relu = nn.ReLU(inplace=True) self.nchannels = nchannels def __repr__(self): s = '{name} ({nchannels})' return s.format(name=self.__class__.__name__, **self.__dict__) def forward(self, input_0): primals_1 = self.scale.weight primals_3 = self.scale.bias primals_2 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
IrisDinge/YoloV3_DOTA
ScaleReLU
false
5,355
[ "MIT" ]
1
cdfe6375a2323e9ee162e50a46478d8a66529e6c
https://github.com/IrisDinge/YoloV3_DOTA/tree/cdfe6375a2323e9ee162e50a46478d8a66529e6c
import torch import torch.nn as nn class Scale(nn.Module): def __init__(self, nchannels, bias=True, init_scale=1.0): super().__init__() self.nchannels = nchannels self.weight = nn.Parameter(torch.Tensor(1, nchannels, 1, 1)) if bias: self.bias = nn.Parameter(torch.Tensor(1, nchannels, 1, 1)) else: self.register_parameter('bias', None) self.reset_parameters(init_scale) def reset_parameters(self, init_scale=1.0): self.weight.data.fill_(init_scale) if self.bias is not None: self.bias.data.fill_(0.0) def forward(self, x): y = x * self.weight if self.bias is not None: y += self.bias return y def __repr__(self): s = '{} ({}, {})' return s.format(self.__class__.__name__, self.nchannels, self.bias is not None) class Model(nn.Module): def __init__(self, nchannels): super().__init__() self.scale = Scale(nchannels) self.relu = nn.ReLU(inplace=True) self.nchannels = nchannels def forward(self, x): x1 = self.scale(x) y = self.relu(x1) return y def __repr__(self): s = '{name} ({nchannels})' return s.format(name=self.__class__.__name__, **self.__dict__) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4]
GCN
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/be/cbej2f3myglhqo2dienhyo4fp7tbscq32k7imbgc2psgl6gaxxhi.py # Topologically Sorted Source Nodes: [add, x], Original ATen: [aten.add, aten.relu] # Source node to ATen node mapping: # add => add # x => relu # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_1, %primals_4), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add,), kwargs = {}) triton_poi_fused_add_relu_0 = async_compile.triton('triton_poi_fused_add_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, ), (1, )) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [support], Original ATen: [aten.mm] extern_kernels.mm(primals_1, primals_3, out=buf0) del primals_3 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [output], Original ATen: [aten.mm] extern_kernels.mm(primals_2, buf0, out=buf1) buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [add, x], Original ATen: [aten.add, aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_add_relu_0.run(buf2, primals_4, 16, grid=grid(16), stream=stream0) del primals_4 buf3 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [support_1], Original ATen: [aten.mm] extern_kernels.mm(buf2, primals_5, out=buf3) buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.addmm(primals_6, primals_2, buf3, alpha=1, beta=1, out=buf4) del buf3 del primals_6 return (buf4, buf2, reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from torch.nn import Module import math import torch from torch import nn from torch.nn import functional as F from torch.nn.parameter import Parameter from torch.nn.modules.module import Module class GraphConvolution(Module): """ Simple GCN layer, similar to https://arxiv.org/abs/1609.02907 Implementation taken from https://github.com/tkipf/pygcn """ def __init__(self, in_features, out_features, bias=True): super(GraphConvolution, self).__init__() self.in_features = in_features self.out_features = out_features self.weight = Parameter(torch.FloatTensor(in_features, out_features)) if bias: self.bias = Parameter(torch.FloatTensor(out_features)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): stdv = 1.0 / math.sqrt(self.weight.size(1)) self.weight.data.uniform_(-stdv, stdv) if self.bias is not None: self.bias.data.uniform_(-stdv, stdv) def forward(self, input1, adj): input1 = input1 adj = adj self.weight = Parameter(self.weight) support = torch.mm(input1, self.weight) output = torch.spmm(adj, support) if self.bias is not None: return output + self.bias else: return output def __repr__(self): return self.__class__.__name__ + ' (' + str(self.in_features ) + ' -> ' + str(self.out_features) + ')' class GCN(nn.Module): def __init__(self, nfeat, nhid, nclass, dropout): super(GCN, self).__init__() self.gc1 = GraphConvolution(nfeat, nhid) self.gc2 = GraphConvolution(nhid, nclass) self.dropout = dropout def forward(self, x, adj): x = x adj = adj x = F.relu(self.gc1(x, adj)) x = F.dropout(x, self.dropout, training=self.training) x = self.gc2(x, adj) return x def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'nfeat': 4, 'nhid': 4, 'nclass': 4, 'dropout': 0.5}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch.nn import Module import math from torch import nn from torch.nn.parameter import Parameter from torch.nn.modules.module import Module assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_1, primals_3, out=buf0) del primals_3 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_2, buf0, out=buf1) buf2 = buf1 del buf1 get_raw_stream(0) triton_poi_fused_add_relu_0[grid(16)](buf2, primals_4, 16, XBLOCK= 16, num_warps=1, num_stages=1) del primals_4 buf3 = buf0 del buf0 extern_kernels.mm(buf2, primals_5, out=buf3) buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_6, primals_2, buf3, alpha=1, beta=1, out=buf4) del buf3 del primals_6 return buf4, buf2, reinterpret_tensor(primals_2, (4, 4), (1, 4), 0 ), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0 ), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0) class GraphConvolution(Module): """ Simple GCN layer, similar to https://arxiv.org/abs/1609.02907 Implementation taken from https://github.com/tkipf/pygcn """ def __init__(self, in_features, out_features, bias=True): super(GraphConvolution, self).__init__() self.in_features = in_features self.out_features = out_features self.weight = Parameter(torch.FloatTensor(in_features, out_features)) if bias: self.bias = Parameter(torch.FloatTensor(out_features)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): stdv = 1.0 / math.sqrt(self.weight.size(1)) self.weight.data.uniform_(-stdv, stdv) if self.bias is not None: self.bias.data.uniform_(-stdv, stdv) def forward(self, input1, adj): input1 = input1 adj = adj self.weight = Parameter(self.weight) support = torch.mm(input1, self.weight) output = torch.spmm(adj, support) if self.bias is not None: return output + self.bias else: return output def __repr__(self): return self.__class__.__name__ + ' (' + str(self.in_features ) + ' -> ' + str(self.out_features) + ')' class GCNNew(nn.Module): def __init__(self, nfeat, nhid, nclass, dropout): super(GCNNew, self).__init__() self.gc1 = GraphConvolution(nfeat, nhid) self.gc2 = GraphConvolution(nhid, nclass) self.dropout = dropout def forward(self, input_0, input_1): primals_1 = self.gc1.weight primals_4 = self.gc1.bias primals_2 = self.gc2.weight primals_6 = self.gc2.bias primals_3 = input_0 primals_5 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
IsmailKent/updown-baseline
GCN
false
5,356
[ "MIT" ]
1
17a09a48e4f30a4a3edc7924f982eb129c583b41
https://github.com/IsmailKent/updown-baseline/tree/17a09a48e4f30a4a3edc7924f982eb129c583b41
from torch.nn import Module import math import torch from torch import nn from torch.nn import functional as F from torch.nn.parameter import Parameter from torch.nn.modules.module import Module class GraphConvolution(Module): """ Simple GCN layer, similar to https://arxiv.org/abs/1609.02907 Implementation taken from https://github.com/tkipf/pygcn """ def __init__(self, in_features, out_features, bias=True): super().__init__() self.in_features = in_features self.out_features = out_features self.weight = Parameter(torch.FloatTensor(in_features, out_features)) if bias: self.bias = Parameter(torch.FloatTensor(out_features)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): stdv = 1.0 / math.sqrt(self.weight.size(1)) self.weight.data.uniform_(-stdv, stdv) if self.bias is not None: self.bias.data.uniform_(-stdv, stdv) def forward(self, input1, adj): input1 = input1 adj = adj self.weight = Parameter(self.weight) support = torch.mm(input1, self.weight) output = torch.spmm(adj, support) if self.bias is not None: return output + self.bias else: return output def __repr__(self): return self.__class__.__name__ + ' (' + str(self.in_features ) + ' -> ' + str(self.out_features) + ')' class Model(nn.Module): def __init__(self, nfeat, nhid, nclass, dropout): super().__init__() self.gc1 = GraphConvolution(nfeat, nhid) self.gc2 = GraphConvolution(nhid, nclass) self.dropout = dropout def forward(self, x, adj): x = x adj = adj x = F.relu(self.gc1(x, adj)) x = F.dropout(x, self.dropout, training=self.training) x = self.gc2(x, adj) return x def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [4, 4, 4, 0.5]
PPReLU
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/yd/cydcn77drgl4w2dldwwhrwwlp2e6rgz5jdramqtogxia2rrvxahe.py # Topologically Sorted Source Nodes: [y, y_1, y_2], Original ATen: [aten.mul, aten.maximum] # Source node to ATen node mapping: # y => mul # y_1 => mul_1 # y_2 => maximum # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %primals_1), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %primals_3), kwargs = {}) # %maximum : [num_users=1] = call_function[target=torch.ops.aten.maximum.default](args = (%mul, %mul_1), kwargs = {}) triton_poi_fused_maximum_mul_0 = async_compile.triton('triton_poi_fused_maximum_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_maximum_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_maximum_mul_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 4 tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tmp4 = tmp0 * tmp3 tmp5 = triton_helpers.maximum(tmp2, tmp4) tl.store(out_ptr0 + (x3), tmp5, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (1, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (1, 4, 1, 1), (4, 1, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [y, y_1, y_2], Original ATen: [aten.mul, aten.maximum] stream0 = get_raw_stream(0) triton_poi_fused_maximum_mul_0.run(primals_2, primals_1, primals_3, buf0, 256, grid=grid(256), stream=stream0) return (buf0, primals_1, primals_2, primals_3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class Scale(nn.Module): def __init__(self, nchannels, bias=True, init_scale=1.0): super().__init__() self.nchannels = nchannels self.weight = nn.Parameter(torch.Tensor(1, nchannels, 1, 1)) if bias: self.bias = nn.Parameter(torch.Tensor(1, nchannels, 1, 1)) else: self.register_parameter('bias', None) self.reset_parameters(init_scale) def reset_parameters(self, init_scale=1.0): self.weight.data.fill_(init_scale) if self.bias is not None: self.bias.data.fill_(0.0) def forward(self, x): y = x * self.weight if self.bias is not None: y += self.bias return y def __repr__(self): s = '{} ({}, {})' return s.format(self.__class__.__name__, self.nchannels, self.bias is not None) class PPReLU(nn.Module): def __init__(self, nchannels): super().__init__() self.scale1 = Scale(nchannels, bias=False, init_scale=1.0) self.scale2 = Scale(nchannels, bias=False, init_scale=0.1) self.nchannels = nchannels def forward(self, x): x1 = self.scale1(x) x2 = self.scale2(x) y = torch.max(x1, x2) return y def __repr__(self): s = '{name} ({nchannels})' return s.format(name=self.__class__.__name__, **self.__dict__) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'nchannels': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_maximum_mul_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tmp4 = tmp0 * tmp3 tmp5 = triton_helpers.maximum(tmp2, tmp4) tl.store(out_ptr0 + x3, tmp5, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (1, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (1, 4, 1, 1), (4, 1, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_maximum_mul_0[grid(256)](primals_2, primals_1, primals_3, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) return buf0, primals_1, primals_2, primals_3 class Scale(nn.Module): def __init__(self, nchannels, bias=True, init_scale=1.0): super().__init__() self.nchannels = nchannels self.weight = nn.Parameter(torch.Tensor(1, nchannels, 1, 1)) if bias: self.bias = nn.Parameter(torch.Tensor(1, nchannels, 1, 1)) else: self.register_parameter('bias', None) self.reset_parameters(init_scale) def reset_parameters(self, init_scale=1.0): self.weight.data.fill_(init_scale) if self.bias is not None: self.bias.data.fill_(0.0) def forward(self, x): y = x * self.weight if self.bias is not None: y += self.bias return y def __repr__(self): s = '{} ({}, {})' return s.format(self.__class__.__name__, self.nchannels, self.bias is not None) class PPReLUNew(nn.Module): def __init__(self, nchannels): super().__init__() self.scale1 = Scale(nchannels, bias=False, init_scale=1.0) self.scale2 = Scale(nchannels, bias=False, init_scale=0.1) self.nchannels = nchannels def __repr__(self): s = '{name} ({nchannels})' return s.format(name=self.__class__.__name__, **self.__dict__) def forward(self, input_0): primals_1 = self.scale1.weight primals_3 = self.scale2.weight primals_2 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
IrisDinge/YoloV3_DOTA
PPReLU
false
5,357
[ "MIT" ]
1
cdfe6375a2323e9ee162e50a46478d8a66529e6c
https://github.com/IrisDinge/YoloV3_DOTA/tree/cdfe6375a2323e9ee162e50a46478d8a66529e6c
import torch import torch.nn as nn class Scale(nn.Module): def __init__(self, nchannels, bias=True, init_scale=1.0): super().__init__() self.nchannels = nchannels self.weight = nn.Parameter(torch.Tensor(1, nchannels, 1, 1)) if bias: self.bias = nn.Parameter(torch.Tensor(1, nchannels, 1, 1)) else: self.register_parameter('bias', None) self.reset_parameters(init_scale) def reset_parameters(self, init_scale=1.0): self.weight.data.fill_(init_scale) if self.bias is not None: self.bias.data.fill_(0.0) def forward(self, x): y = x * self.weight if self.bias is not None: y += self.bias return y def __repr__(self): s = '{} ({}, {})' return s.format(self.__class__.__name__, self.nchannels, self.bias is not None) class Model(nn.Module): def __init__(self, nchannels): super().__init__() self.scale1 = Scale(nchannels, bias=False, init_scale=1.0) self.scale2 = Scale(nchannels, bias=False, init_scale=0.1) self.nchannels = nchannels def forward(self, x): x1 = self.scale1(x) x2 = self.scale2(x) y = torch.max(x1, x2) return y def __repr__(self): s = '{name} ({nchannels})' return s.format(name=self.__class__.__name__, **self.__dict__) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4]
Scale
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/fb/cfbeyr3lhfnhw7ca27iubsdbjxh3gnvnzbr2oxoqiwodjw5uc7dc.py # Topologically Sorted Source Nodes: [y, y_1], Original ATen: [aten.mul, aten.add] # Source node to ATen node mapping: # y => mul # y_1 => add # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %primals_1), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %primals_3), kwargs = {}) triton_poi_fused_add_mul_0 = async_compile.triton('triton_poi_fused_add_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 4 tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tmp4 = tmp2 + tmp3 tl.store(out_ptr0 + (x3), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (1, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (1, 4, 1, 1), (4, 1, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [y, y_1], Original ATen: [aten.mul, aten.add] stream0 = get_raw_stream(0) triton_poi_fused_add_mul_0.run(primals_2, primals_1, primals_3, buf0, 256, grid=grid(256), stream=stream0) del primals_1 del primals_3 return (buf0, primals_2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class Scale(nn.Module): def __init__(self, nchannels, bias=True, init_scale=1.0): super().__init__() self.nchannels = nchannels self.weight = nn.Parameter(torch.Tensor(1, nchannels, 1, 1)) if bias: self.bias = nn.Parameter(torch.Tensor(1, nchannels, 1, 1)) else: self.register_parameter('bias', None) self.reset_parameters(init_scale) def reset_parameters(self, init_scale=1.0): self.weight.data.fill_(init_scale) if self.bias is not None: self.bias.data.fill_(0.0) def forward(self, x): y = x * self.weight if self.bias is not None: y += self.bias return y def __repr__(self): s = '{} ({}, {})' return s.format(self.__class__.__name__, self.nchannels, self.bias is not None) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'nchannels': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_mul_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tmp4 = tmp2 + tmp3 tl.store(out_ptr0 + x3, tmp4, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (1, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (1, 4, 1, 1), (4, 1, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mul_0[grid(256)](primals_2, primals_1, primals_3, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 del primals_3 return buf0, primals_2 class ScaleNew(nn.Module): def __init__(self, nchannels, bias=True, init_scale=1.0): super().__init__() self.nchannels = nchannels self.weight = nn.Parameter(torch.Tensor(1, nchannels, 1, 1)) if bias: self.bias = nn.Parameter(torch.Tensor(1, nchannels, 1, 1)) else: self.register_parameter('bias', None) self.reset_parameters(init_scale) def reset_parameters(self, init_scale=1.0): self.weight.data.fill_(init_scale) if self.bias is not None: self.bias.data.fill_(0.0) def __repr__(self): s = '{} ({}, {})' return s.format(self.__class__.__name__, self.nchannels, self.bias is not None) def forward(self, input_0): primals_1 = self.weight primals_3 = self.bias primals_2 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
IrisDinge/YoloV3_DOTA
Scale
false
5,358
[ "MIT" ]
1
cdfe6375a2323e9ee162e50a46478d8a66529e6c
https://github.com/IrisDinge/YoloV3_DOTA/tree/cdfe6375a2323e9ee162e50a46478d8a66529e6c
import torch import torch.nn as nn class Model(nn.Module): def __init__(self, nchannels, bias=True, init_scale=1.0): super().__init__() self.nchannels = nchannels self.weight = nn.Parameter(torch.Tensor(1, nchannels, 1, 1)) if bias: self.bias = nn.Parameter(torch.Tensor(1, nchannels, 1, 1)) else: self.register_parameter('bias', None) self.reset_parameters(init_scale) def reset_parameters(self, init_scale=1.0): self.weight.data.fill_(init_scale) if self.bias is not None: self.bias.data.fill_(0.0) def forward(self, x): y = x * self.weight if self.bias is not None: y += self.bias return y def __repr__(self): s = '{} ({}, {})' return s.format(self.__class__.__name__, self.nchannels, self.bias is not None) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4]
PaddedMaxPool2d
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/eb/ceb5n3ngnbtecolkfq76e577yj7jgiajgbg2es3d3ftje6u4ouia.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # x => getitem # Graph fragment: # %getitem : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_0 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (16*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (16*x0)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + (16*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + (16*x0)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (4 + (16*x0)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (5 + (16*x0)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (6 + (16*x0)), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr0 + (7 + (16*x0)), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr0 + (8 + (16*x0)), xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr0 + (9 + (16*x0)), xmask, eviction_policy='evict_last') tmp19 = tl.load(in_ptr0 + (10 + (16*x0)), xmask, eviction_policy='evict_last') tmp21 = tl.load(in_ptr0 + (11 + (16*x0)), xmask, eviction_policy='evict_last') tmp23 = tl.load(in_ptr0 + (12 + (16*x0)), xmask, eviction_policy='evict_last') tmp25 = tl.load(in_ptr0 + (13 + (16*x0)), xmask, eviction_policy='evict_last') tmp27 = tl.load(in_ptr0 + (14 + (16*x0)), xmask, eviction_policy='evict_last') tmp29 = tl.load(in_ptr0 + (15 + (16*x0)), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp8 = triton_helpers.maximum(tmp7, tmp6) tmp10 = triton_helpers.maximum(tmp9, tmp8) tmp12 = triton_helpers.maximum(tmp11, tmp10) tmp14 = triton_helpers.maximum(tmp13, tmp12) tmp16 = triton_helpers.maximum(tmp15, tmp14) tmp18 = triton_helpers.maximum(tmp17, tmp16) tmp20 = triton_helpers.maximum(tmp19, tmp18) tmp22 = triton_helpers.maximum(tmp21, tmp20) tmp24 = triton_helpers.maximum(tmp23, tmp22) tmp26 = triton_helpers.maximum(tmp25, tmp24) tmp28 = triton_helpers.maximum(tmp27, tmp26) tmp30 = triton_helpers.maximum(tmp29, tmp28) tl.store(out_ptr0 + (x0), tmp30, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.max_pool2d_with_indices] stream0 = get_raw_stream(0) triton_poi_fused_max_pool2d_with_indices_0.run(arg0_1, buf0, 16, grid=grid(16), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class PaddedMaxPool2d(nn.Module): """ Maxpool layer with a replicating padding. Args: kernel_size (int or tuple): Kernel size for maxpooling stride (int or tuple, optional): The stride of the window; Default ``kernel_size`` padding (tuple, optional): (left, right, top, bottom) padding; Default **None** dilation (int or tuple, optional): A parameter that controls the stride of elements in the window """ def __init__(self, kernel_size, stride=None, padding=(0, 0, 0, 0), dilation=1): super(PaddedMaxPool2d, self).__init__() self.kernel_size = kernel_size self.stride = stride or kernel_size self.padding = padding self.dilation = dilation def __repr__(self): return ( f'{self.__class__.__name__} (kernel_size={self.kernel_size}, stride={self.stride}, padding={self.padding}, dilation={self.dilation})' ) def forward(self, x): x = F.max_pool2d(F.pad(x, self.padding, mode='replicate'), self. kernel_size, self.stride, 0, self.dilation) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'kernel_size': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_max_pool2d_with_indices_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 16 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp3 = tl.load(in_ptr0 + (2 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp5 = tl.load(in_ptr0 + (3 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp7 = tl.load(in_ptr0 + (4 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp9 = tl.load(in_ptr0 + (5 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp11 = tl.load(in_ptr0 + (6 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp13 = tl.load(in_ptr0 + (7 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp15 = tl.load(in_ptr0 + (8 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp17 = tl.load(in_ptr0 + (9 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp19 = tl.load(in_ptr0 + (10 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp21 = tl.load(in_ptr0 + (11 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp23 = tl.load(in_ptr0 + (12 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp25 = tl.load(in_ptr0 + (13 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp27 = tl.load(in_ptr0 + (14 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp29 = tl.load(in_ptr0 + (15 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp8 = triton_helpers.maximum(tmp7, tmp6) tmp10 = triton_helpers.maximum(tmp9, tmp8) tmp12 = triton_helpers.maximum(tmp11, tmp10) tmp14 = triton_helpers.maximum(tmp13, tmp12) tmp16 = triton_helpers.maximum(tmp15, tmp14) tmp18 = triton_helpers.maximum(tmp17, tmp16) tmp20 = triton_helpers.maximum(tmp19, tmp18) tmp22 = triton_helpers.maximum(tmp21, tmp20) tmp24 = triton_helpers.maximum(tmp23, tmp22) tmp26 = triton_helpers.maximum(tmp25, tmp24) tmp28 = triton_helpers.maximum(tmp27, tmp26) tmp30 = triton_helpers.maximum(tmp29, tmp28) tl.store(out_ptr0 + x0, tmp30, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_max_pool2d_with_indices_0[grid(16)](arg0_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del arg0_1 return buf0, class PaddedMaxPool2dNew(nn.Module): """ Maxpool layer with a replicating padding. Args: kernel_size (int or tuple): Kernel size for maxpooling stride (int or tuple, optional): The stride of the window; Default ``kernel_size`` padding (tuple, optional): (left, right, top, bottom) padding; Default **None** dilation (int or tuple, optional): A parameter that controls the stride of elements in the window """ def __init__(self, kernel_size, stride=None, padding=(0, 0, 0, 0), dilation=1): super(PaddedMaxPool2dNew, self).__init__() self.kernel_size = kernel_size self.stride = stride or kernel_size self.padding = padding self.dilation = dilation def __repr__(self): return ( f'{self.__class__.__name__} (kernel_size={self.kernel_size}, stride={self.stride}, padding={self.padding}, dilation={self.dilation})' ) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
IrisDinge/YoloV3_DOTA
PaddedMaxPool2d
false
5,359
[ "MIT" ]
1
cdfe6375a2323e9ee162e50a46478d8a66529e6c
https://github.com/IrisDinge/YoloV3_DOTA/tree/cdfe6375a2323e9ee162e50a46478d8a66529e6c
import torch import torch.nn as nn import torch.nn.functional as F class Model(nn.Module): """ Maxpool layer with a replicating padding. Args: kernel_size (int or tuple): Kernel size for maxpooling stride (int or tuple, optional): The stride of the window; Default ``kernel_size`` padding (tuple, optional): (left, right, top, bottom) padding; Default **None** dilation (int or tuple, optional): A parameter that controls the stride of elements in the window """ def __init__(self, kernel_size, stride=None, padding=(0, 0, 0, 0), dilation=1): super().__init__() self.kernel_size = kernel_size self.stride = stride or kernel_size self.padding = padding self.dilation = dilation def __repr__(self): return ( f'{self.__class__.__name__} (kernel_size={self.kernel_size}, stride={self.stride}, padding={self.padding}, dilation={self.dilation})' ) def forward(self, x): x = F.max_pool2d(F.pad(x, self.padding, mode='replicate'), self. kernel_size, self.stride, 0, self.dilation) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4]
GramMatrix
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/em/ceme7dtfw5rmqppp5ffl7f3bkktbsz45ls4n5npdu7izsivsrkbh.py # Topologically Sorted Source Nodes: [gram], Original ATen: [aten.div] # Source node to ATen node mapping: # gram => div # Graph fragment: # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%bmm, 64), kwargs = {}) triton_poi_fused_div_0 = async_compile.triton('triton_poi_fused_div_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_0(in_out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = 0.015625 tmp2 = tmp0 * tmp1 tl.store(in_out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [bmm], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(arg0_1, (4, 4, 16), (64, 16, 1), 0), reinterpret_tensor(arg0_1, (4, 16, 4), (64, 1, 16), 0), out=buf0) del arg0_1 buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [gram], Original ATen: [aten.div] stream0 = get_raw_stream(0) triton_poi_fused_div_0.run(buf1, 64, grid=grid(64), stream=stream0) return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class GramMatrix(nn.Module): def forward(self, y): b, ch, h, w = y.size() features = y.view(b, ch, w * h) features_t = features.transpose(1, 2) gram = features.bmm(features_t) / (ch * h * w) return gram def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_div_0(in_out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = 0.015625 tmp2 = tmp0 * tmp1 tl.store(in_out_ptr0 + x0, tmp2, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(arg0_1, (4, 4, 16), (64, 16, 1), 0), reinterpret_tensor(arg0_1, (4, 16, 4), (64, 1, 16), 0), out=buf0) del arg0_1 buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_div_0[grid(64)](buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) return buf1, class GramMatrixNew(nn.Module): def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
IvoryCandy/neural-style
GramMatrix
false
5,360
[ "Apache-2.0" ]
1
d9d73676479e36c1cbd6c9af36d857f80099504b
https://github.com/IvoryCandy/neural-style/tree/d9d73676479e36c1cbd6c9af36d857f80099504b
import torch import torch.nn as nn class Model(nn.Module): def forward(self, y): b, ch, h, w = y.size() features = y.view(b, ch, w * h) features_t = features.transpose(1, 2) gram = features.bmm(features_t) / (ch * h * w) return gram def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
Normalize
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/dz/cdzlfn35yag6jtz5ni2o3wxs6zz4qa5ljfjpsrkhqfmlbh3qhae3.py # Topologically Sorted Source Nodes: [pow_1, sum_1, norm, out], Original ATen: [aten.pow, aten.sum, aten.div] # Source node to ATen node mapping: # norm => pow_2 # out => div # pow_1 => pow_1 # sum_1 => sum_1 # Graph fragment: # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg0_1, 2), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1], True), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%arg0_1, %pow_2), kwargs = {}) triton_poi_fused_div_pow_sum_0 = async_compile.triton('triton_poi_fused_div_pow_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_pow_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_pow_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = tmp0 / tmp12 tl.store(out_ptr0 + (x3), tmp13, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [pow_1, sum_1, norm, out], Original ATen: [aten.pow, aten.sum, aten.div] stream0 = get_raw_stream(0) triton_poi_fused_div_pow_sum_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn class Normalize(nn.Module): """normalization layer""" def __init__(self, power=2): super(Normalize, self).__init__() self.power = power def forward(self, x): norm = x.pow(self.power).sum(1, keepdim=True).pow(1.0 / self.power) out = x.div(norm) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_div_pow_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = tmp0 / tmp12 tl.store(out_ptr0 + x3, tmp13, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_pow_sum_0[grid(256)](arg0_1, buf0, 256, XBLOCK =128, num_warps=4, num_stages=1) del arg0_1 return buf0, class NormalizeNew(nn.Module): """normalization layer""" def __init__(self, power=2): super(NormalizeNew, self).__init__() self.power = power def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
JJuOn/Few-shot_Class_Incremental_Learning
Normalize
false
5,362
[ "MIT" ]
1
a2178051a6fefcd73b60f5e4236116bf828a801c
https://github.com/JJuOn/Few-shot_Class_Incremental_Learning/tree/a2178051a6fefcd73b60f5e4236116bf828a801c
import torch from torch import nn class Model(nn.Module): """normalization layer""" def __init__(self, power=2): super().__init__() self.power = power def forward(self, x): norm = x.pow(self.power).sum(1, keepdim=True).pow(1.0 / self.power) out = x.div(norm) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
Readout
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/ms/cmsuzohbg5nq52jnvirovzkvykrzzko5xomu7zyu5e5u2lhegppw.py # Topologically Sorted Source Nodes: [combined_rep], Original ATen: [aten.cat] # Source node to ATen node mapping: # combined_rep => cat # Graph fragment: # %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %primals_2], 1), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = (xindex // 8) x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + (x2), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/5b/c5br3r4gpi7zzaygqfdgcqeerwiekt2d2t2wkw4sj54lam6radgq.py # Topologically Sorted Source Nodes: [hidden_rep], Original ATen: [aten.relu] # Source node to ATen node mapping: # hidden_rep => relu # Graph fragment: # %add_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_4), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_1,), kwargs = {}) triton_poi_fused_relu_1 = async_compile.triton('triton_poi_fused_relu_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/t2/ct2gakqskycxgwsn5noyoyvzwoohje4fjsbktm2qhfjncrcx3tdi.py # Topologically Sorted Source Nodes: [logits], Original ATen: [aten.mean] # Source node to ATen node mapping: # logits => mean # Graph fragment: # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%addmm_2, [0]), kwargs = {}) triton_poi_fused_mean_2 = async_compile.triton('triton_poi_fused_mean_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mean_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mean_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr0 + (4 + x0), xmask) tmp3 = tl.load(in_ptr0 + (8 + x0), xmask) tmp5 = tl.load(in_ptr0 + (12 + x0), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tl.store(out_ptr0 + (x0), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 8), (8, 1)) assert_size_stride(primals_4, (4, ), (1, )) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4, ), (1, )) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32) # Topologically Sorted Source Nodes: [combined_rep], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(primals_1, primals_2, buf0, 32, grid=grid(32), stream=stream0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf0, reinterpret_tensor(primals_3, (8, 4), (1, 8), 0), out=buf1) del primals_3 buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [hidden_rep], Original ATen: [aten.relu] triton_poi_fused_relu_1.run(buf2, primals_4, 16, grid=grid(16), stream=stream0) del primals_4 buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf2, reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf3) buf4 = buf3; del buf3 # reuse # Topologically Sorted Source Nodes: [graph_rep], Original ATen: [aten.relu] triton_poi_fused_relu_1.run(buf4, primals_6, 16, grid=grid(16), stream=stream0) del primals_6 buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.addmm] extern_kernels.addmm(primals_8, buf4, reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf5) del primals_8 buf6 = empty_strided_cuda((4, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [logits], Original ATen: [aten.mean] triton_poi_fused_mean_2.run(buf5, buf6, 4, grid=grid(4), stream=stream0) del buf5 return (buf6, buf0, buf2, buf4, primals_7, primals_5, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.utils.data class Readout(nn.Module): """ This module learns a single graph level representation for a molecule given GraphSAGE generated embeddings """ def __init__(self, attr_dim, embedding_dim, hidden_dim, output_dim, num_cats): super(Readout, self).__init__() self.attr_dim = attr_dim self.hidden_dim = hidden_dim self.output_dim = output_dim self.num_cats = num_cats self.layer1 = nn.Linear(attr_dim + embedding_dim, hidden_dim) self.layer2 = nn.Linear(hidden_dim, output_dim) self.output = nn.Linear(output_dim, num_cats) self.act = nn.ReLU() def forward(self, node_features, node_embeddings): combined_rep = torch.cat((node_features, node_embeddings), dim=1) hidden_rep = self.act(self.layer1(combined_rep)) graph_rep = self.act(self.layer2(hidden_rep)) logits = torch.mean(self.output(graph_rep), dim=0) return logits def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'attr_dim': 4, 'embedding_dim': 4, 'hidden_dim': 4, 'output_dim': 4, 'num_cats': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x2, tmp10, xmask) @triton.jit def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_mean_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + (4 + x0), xmask) tmp3 = tl.load(in_ptr0 + (8 + x0), xmask) tmp5 = tl.load(in_ptr0 + (12 + x0), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tl.store(out_ptr0 + x0, tmp8, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 8), (8, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(32)](primals_1, primals_2, buf0, 32, XBLOCK=32, num_warps=1, num_stages=1) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf0, reinterpret_tensor(primals_3, (8, 4), (1, 8 ), 0), out=buf1) del primals_3 buf2 = buf1 del buf1 triton_poi_fused_relu_1[grid(16)](buf2, primals_4, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_4 buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf2, reinterpret_tensor(primals_5, (4, 4), (1, 4 ), 0), out=buf3) buf4 = buf3 del buf3 triton_poi_fused_relu_1[grid(16)](buf4, primals_6, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_6 buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_8, buf4, reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf5) del primals_8 buf6 = empty_strided_cuda((4,), (1,), torch.float32) triton_poi_fused_mean_2[grid(4)](buf5, buf6, 4, XBLOCK=4, num_warps =1, num_stages=1) del buf5 return buf6, buf0, buf2, buf4, primals_7, primals_5 class ReadoutNew(nn.Module): """ This module learns a single graph level representation for a molecule given GraphSAGE generated embeddings """ def __init__(self, attr_dim, embedding_dim, hidden_dim, output_dim, num_cats): super(ReadoutNew, self).__init__() self.attr_dim = attr_dim self.hidden_dim = hidden_dim self.output_dim = output_dim self.num_cats = num_cats self.layer1 = nn.Linear(attr_dim + embedding_dim, hidden_dim) self.layer2 = nn.Linear(hidden_dim, output_dim) self.output = nn.Linear(output_dim, num_cats) self.act = nn.ReLU() def forward(self, input_0, input_1): primals_3 = self.layer1.weight primals_4 = self.layer1.bias primals_1 = self.layer2.weight primals_6 = self.layer2.bias primals_2 = self.output.weight primals_8 = self.output.bias primals_5 = input_0 primals_7 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return output[0]
JW9MsjwjnpdRLFw/TSFL
Readout
false
5,363
[ "MIT" ]
1
ccca391348fde270c9d43149a3397ac3cad4c6e0
https://github.com/JW9MsjwjnpdRLFw/TSFL/tree/ccca391348fde270c9d43149a3397ac3cad4c6e0
import torch import torch.nn as nn import torch.utils.data class Model(nn.Module): """ This module learns a single graph level representation for a molecule given GraphSAGE generated embeddings """ def __init__(self, attr_dim, embedding_dim, hidden_dim, output_dim, num_cats): super().__init__() self.attr_dim = attr_dim self.hidden_dim = hidden_dim self.output_dim = output_dim self.num_cats = num_cats self.layer1 = nn.Linear(attr_dim + embedding_dim, hidden_dim) self.layer2 = nn.Linear(hidden_dim, output_dim) self.output = nn.Linear(output_dim, num_cats) self.act = nn.ReLU() def forward(self, node_features, node_embeddings): combined_rep = torch.cat((node_features, node_embeddings), dim=1) hidden_rep = self.act(self.layer1(combined_rep)) graph_rep = self.act(self.layer2(hidden_rep)) logits = torch.mean(self.output(graph_rep), dim=0) return logits def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'attr_dim': 4, 'embedding_dim': 4, 'hidden_dim': 4, 'output_dim': 4, 'num_cats': 4}]
GCN
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/3w/c3w6ed26vbtzgzigkkyuiuwphm777x7aoogcxkvoly3ia7qifnzg.py # Topologically Sorted Source Nodes: [embeddings_1], Original ATen: [aten.relu] # Source node to ATen node mapping: # embeddings_1 => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%mm_1,), kwargs = {}) triton_poi_fused_relu_0 = async_compile.triton('triton_poi_fused_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_0(in_out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tl.store(in_out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/hn/chnw3utjml5th2ajz6bi2grgvq2fx734zxlojqpjfuufsiazjieq.py # Topologically Sorted Source Nodes: [embeddings_4], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # embeddings_4 => relu_1 # Graph fragment: # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%mm_3,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_relu_threshold_backward_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp3 = 0.0 tmp4 = tmp2 <= tmp3 tl.store(in_out_ptr0 + (x0), tmp2, xmask) tl.store(out_ptr0 + (x0), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [support], Original ATen: [aten.mm] extern_kernels.mm(primals_2, primals_1, out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [embeddings], Original ATen: [aten.mm] extern_kernels.mm(primals_3, buf0, out=buf1) buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [embeddings_1], Original ATen: [aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_relu_0.run(buf2, 16, grid=grid(16), stream=stream0) buf3 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [support_1], Original ATen: [aten.mm] extern_kernels.mm(buf2, primals_4, out=buf3) buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [embeddings_3], Original ATen: [aten.mm] extern_kernels.mm(primals_3, buf3, out=buf4) del buf3 buf5 = buf4; del buf4 # reuse buf6 = empty_strided_cuda((4, 4), (4, 1), torch.bool) # Topologically Sorted Source Nodes: [embeddings_4], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_1.run(buf5, buf6, 16, grid=grid(16), stream=stream0) return (buf5, buf2, buf6, reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.utils.data class GCN(nn.Module): """ Graph Convolutional Network based on https://arxiv.org/abs/1609.02907 """ def __init__(self, feat_dim, hidden_dim1, hidden_dim2, dropout, is_sparse=False): """Dense version of GAT.""" super(GCN, self).__init__() self.W1 = nn.Parameter(torch.FloatTensor(feat_dim, hidden_dim1)) self.W2 = nn.Parameter(torch.FloatTensor(hidden_dim1, hidden_dim2)) self.relu = nn.ReLU() self.dropout = nn.Dropout(p=dropout) nn.init.xavier_uniform_(self.W1.data) nn.init.xavier_uniform_(self.W2.data) self.is_sparse = is_sparse def forward(self, x, adj): support = torch.mm(x, self.W1) embeddings = torch.sparse.mm(adj, support ) if self.is_sparse else torch.mm(adj, support) embeddings = self.relu(embeddings) embeddings = self.dropout(embeddings) support = torch.mm(embeddings, self.W2) embeddings = torch.sparse.mm(adj, support ) if self.is_sparse else torch.mm(adj, support) embeddings = self.relu(embeddings) return embeddings def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'feat_dim': 4, 'hidden_dim1': 4, 'hidden_dim2': 4, 'dropout': 0.5}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_0(in_out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tl.store(in_out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp3 = 0.0 tmp4 = tmp2 <= tmp3 tl.store(in_out_ptr0 + x0, tmp2, xmask) tl.store(out_ptr0 + x0, tmp4, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_2, primals_1, out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_3, buf0, out=buf1) buf2 = buf1 del buf1 get_raw_stream(0) triton_poi_fused_relu_0[grid(16)](buf2, 16, XBLOCK=16, num_warps=1, num_stages=1) buf3 = buf0 del buf0 extern_kernels.mm(buf2, primals_4, out=buf3) buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_3, buf3, out=buf4) del buf3 buf5 = buf4 del buf4 buf6 = empty_strided_cuda((4, 4), (4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_1[grid(16)](buf5, buf6, 16, XBLOCK=16, num_warps=1, num_stages=1) return buf5, buf2, buf6, reinterpret_tensor(primals_3, (4, 4), (1, 4), 0 ), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0 ), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0) class GCNNew(nn.Module): """ Graph Convolutional Network based on https://arxiv.org/abs/1609.02907 """ def __init__(self, feat_dim, hidden_dim1, hidden_dim2, dropout, is_sparse=False): """Dense version of GAT.""" super(GCNNew, self).__init__() self.W1 = nn.Parameter(torch.FloatTensor(feat_dim, hidden_dim1)) self.W2 = nn.Parameter(torch.FloatTensor(hidden_dim1, hidden_dim2)) self.relu = nn.ReLU() self.dropout = nn.Dropout(p=dropout) nn.init.xavier_uniform_(self.W1.data) nn.init.xavier_uniform_(self.W2.data) self.is_sparse = is_sparse def forward(self, input_0, input_1): primals_1 = self.W1 primals_2 = self.W2 primals_3 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
JW9MsjwjnpdRLFw/TSFL
GCN
false
5,364
[ "MIT" ]
1
ccca391348fde270c9d43149a3397ac3cad4c6e0
https://github.com/JW9MsjwjnpdRLFw/TSFL/tree/ccca391348fde270c9d43149a3397ac3cad4c6e0
import torch import torch.nn as nn import torch.utils.data class Model(nn.Module): """ Graph Convolutional Network based on https://arxiv.org/abs/1609.02907 """ def __init__(self, feat_dim, hidden_dim1, hidden_dim2, dropout, is_sparse=False): """Dense version of GAT.""" super().__init__() self.W1 = nn.Parameter(torch.FloatTensor(feat_dim, hidden_dim1)) self.W2 = nn.Parameter(torch.FloatTensor(hidden_dim1, hidden_dim2)) self.relu = nn.ReLU() self.dropout = nn.Dropout(p=dropout) nn.init.xavier_uniform_(self.W1.data) nn.init.xavier_uniform_(self.W2.data) self.is_sparse = is_sparse def forward(self, x, adj): support = torch.mm(x, self.W1) embeddings = torch.sparse.mm(adj, support ) if self.is_sparse else torch.mm(adj, support) embeddings = self.relu(embeddings) embeddings = self.dropout(embeddings) support = torch.mm(embeddings, self.W2) embeddings = torch.sparse.mm(adj, support ) if self.is_sparse else torch.mm(adj, support) embeddings = self.relu(embeddings) return embeddings def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'feat_dim': 4, 'hidden_dim1': 4, 'hidden_dim2': 4, 'dropout': 0.5}]
Decoder
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/5i/c5i7cl7fzfqfwzp6copre24vliqiu7jwlux362n6zv7uvxddgs3p.py # Topologically Sorted Source Nodes: [scores, log_probs, mul, sum_1, log_likelihood], Original ATen: [aten.add, aten._log_softmax, aten.mul, aten.sum, aten.mean] # Source node to ATen node mapping: # log_likelihood => mean # log_probs => amax, exp, log, sub, sub_1, sum_1 # mul => mul # scores => add # sum_1 => sum_2 # Graph fragment: # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %primals_3), kwargs = {}) # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%add, [1], True), kwargs = {}) # %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %amax), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %primals_4), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sum_2,), kwargs = {}) triton_per_fused__log_softmax_add_mean_mul_sum_0 = async_compile.triton('triton_per_fused__log_softmax_add_mean_mul_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {4: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=(4,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__log_softmax_add_mean_mul_sum_0', 'mutated_arg_names': ['in_out_ptr1'], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__log_softmax_add_mean_mul_sum_0(in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = (rindex // 16) r4 = rindex % 16 r0 = rindex % 4 r3 = rindex tmp0 = tl.load(in_ptr0 + (r4 + (64*r2)), None) tmp1 = tl.load(in_ptr1 + (r0), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (16 + r4 + (64*r2)), None) tmp6 = tl.load(in_ptr0 + (32 + r4 + (64*r2)), None) tmp9 = tl.load(in_ptr0 + (48 + r4 + (64*r2)), None) tmp25 = tl.load(in_ptr2 + (r4 + (64*r2)), None) tmp28 = tl.load(in_ptr2 + (16 + r4 + (64*r2)), None) tmp32 = tl.load(in_ptr2 + (32 + r4 + (64*r2)), None) tmp36 = tl.load(in_ptr2 + (48 + r4 + (64*r2)), None) tmp2 = tmp0 + tmp1 tmp4 = tmp3 + tmp1 tmp5 = triton_helpers.maximum(tmp2, tmp4) tmp7 = tmp6 + tmp1 tmp8 = triton_helpers.maximum(tmp5, tmp7) tmp10 = tmp9 + tmp1 tmp11 = triton_helpers.maximum(tmp8, tmp10) tmp12 = tmp2 - tmp11 tmp13 = tl_math.exp(tmp12) tmp14 = tmp4 - tmp11 tmp15 = tl_math.exp(tmp14) tmp16 = tmp13 + tmp15 tmp17 = tmp7 - tmp11 tmp18 = tl_math.exp(tmp17) tmp19 = tmp16 + tmp18 tmp20 = tmp10 - tmp11 tmp21 = tl_math.exp(tmp20) tmp22 = tmp19 + tmp21 tmp23 = tl_math.log(tmp22) tmp24 = tmp12 - tmp23 tmp26 = tmp24 * tmp25 tmp27 = tmp14 - tmp23 tmp29 = tmp27 * tmp28 tmp30 = tmp26 + tmp29 tmp31 = tmp17 - tmp23 tmp33 = tmp31 * tmp32 tmp34 = tmp30 + tmp33 tmp35 = tmp20 - tmp23 tmp37 = tmp35 * tmp36 tmp38 = tmp34 + tmp37 tmp39 = tl.broadcast_to(tmp38, [XBLOCK, RBLOCK]) tmp41 = tl.sum(tmp39, 1)[:, None] tmp42 = 64.0 tmp43 = tmp41 / tmp42 tl.debug_barrier() tl.store(in_out_ptr1 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp43, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (1, 4), (4, 1)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), primals_1, out=buf0) del primals_1 buf4 = empty_strided_cuda((), (), torch.float32) buf5 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [scores, log_probs, mul, sum_1, log_likelihood], Original ATen: [aten.add, aten._log_softmax, aten.mul, aten.sum, aten.mean] stream0 = get_raw_stream(0) triton_per_fused__log_softmax_add_mean_mul_sum_0.run(buf5, buf0, primals_3, primals_4, 1, 64, grid=grid(1), stream=stream0) return (buf5, primals_3, primals_4, buf0, reinterpret_tensor(primals_2, (4, 64), (1, 4), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class Decoder(nn.Module): def __init__(self, dim_encoding, vocab_size): super().__init__() self.E = nn.Embedding(dim_encoding, vocab_size) self.b = nn.Parameter(torch.zeros(1, vocab_size)) def forward(self, Z, targets): scores = Z @ self.E.weight + self.b log_probs = scores.log_softmax(dim=1) log_likelihood = (log_probs * targets).sum(1).mean() return log_likelihood def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'dim_encoding': 4, 'vocab_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused__log_softmax_add_mean_mul_sum_0(in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex // 16 r4 = rindex % 16 r0 = rindex % 4 tmp0 = tl.load(in_ptr0 + (r4 + 64 * r2), None) tmp1 = tl.load(in_ptr1 + r0, None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (16 + r4 + 64 * r2), None) tmp6 = tl.load(in_ptr0 + (32 + r4 + 64 * r2), None) tmp9 = tl.load(in_ptr0 + (48 + r4 + 64 * r2), None) tmp25 = tl.load(in_ptr2 + (r4 + 64 * r2), None) tmp28 = tl.load(in_ptr2 + (16 + r4 + 64 * r2), None) tmp32 = tl.load(in_ptr2 + (32 + r4 + 64 * r2), None) tmp36 = tl.load(in_ptr2 + (48 + r4 + 64 * r2), None) tmp2 = tmp0 + tmp1 tmp4 = tmp3 + tmp1 tmp5 = triton_helpers.maximum(tmp2, tmp4) tmp7 = tmp6 + tmp1 tmp8 = triton_helpers.maximum(tmp5, tmp7) tmp10 = tmp9 + tmp1 tmp11 = triton_helpers.maximum(tmp8, tmp10) tmp12 = tmp2 - tmp11 tmp13 = tl_math.exp(tmp12) tmp14 = tmp4 - tmp11 tmp15 = tl_math.exp(tmp14) tmp16 = tmp13 + tmp15 tmp17 = tmp7 - tmp11 tmp18 = tl_math.exp(tmp17) tmp19 = tmp16 + tmp18 tmp20 = tmp10 - tmp11 tmp21 = tl_math.exp(tmp20) tmp22 = tmp19 + tmp21 tmp23 = tl_math.log(tmp22) tmp24 = tmp12 - tmp23 tmp26 = tmp24 * tmp25 tmp27 = tmp14 - tmp23 tmp29 = tmp27 * tmp28 tmp30 = tmp26 + tmp29 tmp31 = tmp17 - tmp23 tmp33 = tmp31 * tmp32 tmp34 = tmp30 + tmp33 tmp35 = tmp20 - tmp23 tmp37 = tmp35 * tmp36 tmp38 = tmp34 + tmp37 tmp39 = tl.broadcast_to(tmp38, [XBLOCK, RBLOCK]) tmp41 = tl.sum(tmp39, 1)[:, None] tmp42 = 64.0 tmp43 = tmp41 / tmp42 tl.debug_barrier() tl.store(in_out_ptr1 + tl.full([XBLOCK, 1], 0, tl.int32), tmp43, None) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (1, 4), (4, 1)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), primals_1, out=buf0) del primals_1 buf4 = empty_strided_cuda((), (), torch.float32) buf5 = buf4 del buf4 get_raw_stream(0) triton_per_fused__log_softmax_add_mean_mul_sum_0[grid(1)](buf5, buf0, primals_3, primals_4, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) return buf5, primals_3, primals_4, buf0, reinterpret_tensor(primals_2, (4, 64), (1, 4), 0) class DecoderNew(nn.Module): def __init__(self, dim_encoding, vocab_size): super().__init__() self.E = nn.Embedding(dim_encoding, vocab_size) self.b = nn.Parameter(torch.zeros(1, vocab_size)) def forward(self, input_0, input_1): primals_3 = self.b primals_1 = self.E.weight primals_2 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
J-zin/Semantic-Hashing-Models
Decoder
false
5,365
[ "MIT" ]
1
2e4a2348bc8399a9739016e1a1a5e25a77babbbd
https://github.com/J-zin/Semantic-Hashing-Models/tree/2e4a2348bc8399a9739016e1a1a5e25a77babbbd
import torch import torch.nn as nn class Model(nn.Module): def __init__(self, dim_encoding, vocab_size): super().__init__() self.E = nn.Embedding(dim_encoding, vocab_size) self.b = nn.Parameter(torch.zeros(1, vocab_size)) def forward(self, Z, targets): scores = Z @ self.E.weight + self.b log_probs = scores.log_softmax(dim=1) log_likelihood = (log_probs * targets).sum(1).mean() return log_likelihood def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 4]
CenteredLayer
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/wn/cwnacrzxbn46f6bicovulqi3xha33kn6njpmjiuh67enpgvyqnvi.py # Topologically Sorted Source Nodes: [mean, sub], Original ATen: [aten.mean, aten.sub] # Source node to ATen node mapping: # mean => mean # sub => sub # Graph fragment: # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%arg0_1,), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %mean), kwargs = {}) triton_per_fused_mean_sub_0 = async_compile.triton('triton_per_fused_mean_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=(2,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_sub_0', 'mutated_arg_names': [], 'no_x_dim': True, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_mean_sub_0(in_ptr0, out_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = triton_helpers.promote_to_tensor(tl.sum(tmp1, 0)) tmp4 = 256.0 tmp5 = tmp3 / tmp4 tmp6 = tmp0 - tmp5 tl.store(out_ptr1 + (tl.broadcast_to(r0, [RBLOCK])), tmp6, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mean, sub], Original ATen: [aten.mean, aten.sub] stream0 = get_raw_stream(0) triton_per_fused_mean_sub_0.run(arg0_1, buf1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn class CenteredLayer(nn.Module): def __init__(self, **kwargs): super(CenteredLayer, self).__init__(**kwargs) def forward(self, x): return x - x.mean() def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_mean_sub_0(in_ptr0, out_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = triton_helpers.promote_to_tensor(tl.sum(tmp1, 0)) tmp4 = 256.0 tmp5 = tmp3 / tmp4 tmp6 = tmp0 - tmp5 tl.store(out_ptr1 + tl.broadcast_to(r0, [RBLOCK]), tmp6, None) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_per_fused_mean_sub_0[grid(1)](arg0_1, buf1, 1, 256, num_warps=2, num_stages=1) del arg0_1 return buf1, class CenteredLayerNew(nn.Module): def __init__(self, **kwargs): super(CenteredLayerNew, self).__init__(**kwargs) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
JamesWang007/Dive-into-DL-PyTorch
CenteredLayer
false
5,366
[ "Apache-2.0" ]
1
267b54168322ab37da44e83008fba4f24b70fa9f
https://github.com/JamesWang007/Dive-into-DL-PyTorch/tree/267b54168322ab37da44e83008fba4f24b70fa9f
import torch from torch import nn class Model(nn.Module): def __init__(self, **kwargs): super().__init__(**kwargs) def forward(self, x): return x - x.mean() def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
ConvBlock
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/sz/cszryrj5kmfztyrg6r3zq6t6rqkltip5llc24nn3ozvittnostjb.py # Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten._prelu_kernel] # Source node to ATen node mapping: # x => convolution # x_1 => gt, mul, where # Graph fragment: # %convolution : [num_users=4] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution, 0), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view, %convolution), kwargs = {}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %convolution, %mul), kwargs = {}) triton_poi_fused__prelu_kernel_convolution_0 = async_compile.triton('triton_poi_fused__prelu_kernel_convolution_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__prelu_kernel_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__prelu_kernel_convolution_0(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp6 = tmp5 * tmp2 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(in_out_ptr0 + (x3), tmp2, xmask) tl.store(out_ptr0 + (x3), tmp7, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = buf0; del buf0 # reuse buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten._prelu_kernel] stream0 = get_raw_stream(0) triton_poi_fused__prelu_kernel_convolution_0.run(buf1, primals_2, primals_4, buf2, 256, grid=grid(256), stream=stream0) del primals_2 return (buf2, primals_1, primals_3, primals_4, buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.onnx import torch import torch.nn as nn import torch.utils.data class ConvBlock(nn.Module): def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding=1): super(ConvBlock, self).__init__() self.Mconv = nn.Conv2d(in_channels=in_channels, out_channels= out_channels, kernel_size=kernel_size, stride=stride, padding= padding) self.MPrelu = nn.PReLU(num_parameters=out_channels) def forward(self, x): x = self.Mconv(x) x = self.MPrelu(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.onnx import torch import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__prelu_kernel_convolution_0(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp6 = tmp5 * tmp2 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(in_out_ptr0 + x3, tmp2, xmask) tl.store(out_ptr0 + x3, tmp7, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = buf0 del buf0 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__prelu_kernel_convolution_0[grid(256)](buf1, primals_2, primals_4, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 return buf2, primals_1, primals_3, primals_4, buf1 class ConvBlockNew(nn.Module): def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding=1): super(ConvBlockNew, self).__init__() self.Mconv = nn.Conv2d(in_channels=in_channels, out_channels= out_channels, kernel_size=kernel_size, stride=stride, padding= padding) self.MPrelu = nn.PReLU(num_parameters=out_channels) def forward(self, input_0): primals_1 = self.Mconv.weight primals_2 = self.Mconv.bias primals_4 = self.MPrelu.weight primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
IrohXu/Infant-Pose-pytorch
ConvBlock
false
5,367
[ "MIT" ]
1
148c43fbfefe06ec2fffa7055049c3ff341154f8
https://github.com/IrohXu/Infant-Pose-pytorch/tree/148c43fbfefe06ec2fffa7055049c3ff341154f8
import torch import torch.onnx import torch import torch.nn as nn import torch.utils.data class Model(nn.Module): def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding=1): super().__init__() self.Mconv = nn.Conv2d(in_channels=in_channels, out_channels= out_channels, kernel_size=kernel_size, stride=stride, padding= padding) self.MPrelu = nn.PReLU(num_parameters=out_channels) def forward(self, x): x = self.Mconv(x) x = self.MPrelu(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 4]
StageBlock
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/sz/cszryrj5kmfztyrg6r3zq6t6rqkltip5llc24nn3ozvittnostjb.py # Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten._prelu_kernel] # Source node to ATen node mapping: # x => convolution # x_1 => gt, mul, where # Graph fragment: # %convolution : [num_users=4] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution, 0), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view, %convolution), kwargs = {}) # %where : [num_users=3] = call_function[target=torch.ops.aten.where.self](args = (%gt, %convolution, %mul), kwargs = {}) triton_poi_fused__prelu_kernel_convolution_0 = async_compile.triton('triton_poi_fused__prelu_kernel_convolution_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__prelu_kernel_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__prelu_kernel_convolution_0(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp6 = tmp5 * tmp2 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(in_out_ptr0 + (x3), tmp2, xmask) tl.store(out_ptr0 + (x3), tmp7, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/32/c32v7egt4mupqssam3gmac2qgv3ujprjybthsgweflmot256qqw7.py # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.convolution] # Source node to ATen node mapping: # x_4 => convolution_2 # Graph fragment: # %convolution_2 : [num_users=4] = call_function[target=torch.ops.aten.convolution.default](args = (%where_1, %primals_8, %primals_9, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/yc/cyctw7fzbq3yqgryx4i6waopqgcuvh2eufmovqoakos4fawmbbyw.py # Topologically Sorted Source Nodes: [x_cat_1], Original ATen: [aten.cat] # Source node to ATen node mapping: # x_cat_1 => cat # Graph fragment: # %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%where, %where_1, %where_2], 1), kwargs = {}) triton_poi_fused_cat_2 = async_compile.triton('triton_poi_fused_cat_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 768 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 16) % 12 x0 = xindex % 16 x2 = (xindex // 192) x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + (16*x1) + (64*x2)), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + (x0 + (16*((-4) + x1)) + (64*x2)), tmp9 & xmask, other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1], 12, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tl.load(in_ptr2 + (x0 + (16*((-8) + x1)) + (64*x2)), tmp11 & xmask, other=0.0) tmp15 = 0.0 tmp16 = tmp14 > tmp15 tmp17 = tl.load(in_ptr3 + ((-8) + x1), tmp11 & xmask, eviction_policy='evict_last', other=0.0) tmp18 = tmp17 * tmp14 tmp19 = tl.where(tmp16, tmp14, tmp18) tmp20 = tl.full(tmp19.shape, 0.0, tmp19.dtype) tmp21 = tl.where(tmp11, tmp19, tmp20) tmp22 = tl.where(tmp9, tmp10, tmp21) tmp23 = tl.where(tmp4, tmp5, tmp22) tl.store(out_ptr0 + (x3), tmp23, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47, primals_48, primals_49, primals_50, primals_51 = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, ), (1, )) assert_size_stride(primals_5, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_6, (4, ), (1, )) assert_size_stride(primals_7, (4, ), (1, )) assert_size_stride(primals_8, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_9, (4, ), (1, )) assert_size_stride(primals_10, (4, ), (1, )) assert_size_stride(primals_11, (4, 12, 3, 3), (108, 9, 3, 1)) assert_size_stride(primals_12, (4, ), (1, )) assert_size_stride(primals_13, (4, ), (1, )) assert_size_stride(primals_14, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_15, (4, ), (1, )) assert_size_stride(primals_16, (4, ), (1, )) assert_size_stride(primals_17, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_18, (4, ), (1, )) assert_size_stride(primals_19, (4, ), (1, )) assert_size_stride(primals_20, (4, 12, 3, 3), (108, 9, 3, 1)) assert_size_stride(primals_21, (4, ), (1, )) assert_size_stride(primals_22, (4, ), (1, )) assert_size_stride(primals_23, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_24, (4, ), (1, )) assert_size_stride(primals_25, (4, ), (1, )) assert_size_stride(primals_26, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_27, (4, ), (1, )) assert_size_stride(primals_28, (4, ), (1, )) assert_size_stride(primals_29, (4, 12, 3, 3), (108, 9, 3, 1)) assert_size_stride(primals_30, (4, ), (1, )) assert_size_stride(primals_31, (4, ), (1, )) assert_size_stride(primals_32, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_33, (4, ), (1, )) assert_size_stride(primals_34, (4, ), (1, )) assert_size_stride(primals_35, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_36, (4, ), (1, )) assert_size_stride(primals_37, (4, ), (1, )) assert_size_stride(primals_38, (4, 12, 3, 3), (108, 9, 3, 1)) assert_size_stride(primals_39, (4, ), (1, )) assert_size_stride(primals_40, (4, ), (1, )) assert_size_stride(primals_41, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_42, (4, ), (1, )) assert_size_stride(primals_43, (4, ), (1, )) assert_size_stride(primals_44, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_45, (4, ), (1, )) assert_size_stride(primals_46, (4, ), (1, )) assert_size_stride(primals_47, (4, 12, 1, 1), (12, 1, 1, 1)) assert_size_stride(primals_48, (4, ), (1, )) assert_size_stride(primals_49, (4, ), (1, )) assert_size_stride(primals_50, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_51, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = buf0; del buf0 # reuse buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten._prelu_kernel] stream0 = get_raw_stream(0) triton_poi_fused__prelu_kernel_convolution_0.run(buf1, primals_2, primals_4, buf2, 256, grid=grid(256), stream=stream0) del primals_2 # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution] buf3 = extern_kernels.convolution(buf2, primals_5, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1)) buf4 = buf3; del buf3 # reuse buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_2, x_3], Original ATen: [aten.convolution, aten._prelu_kernel] triton_poi_fused__prelu_kernel_convolution_0.run(buf4, primals_6, primals_7, buf5, 256, grid=grid(256), stream=stream0) del primals_6 # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.convolution] buf6 = extern_kernels.convolution(buf5, primals_8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 4, 4, 4), (64, 16, 4, 1)) buf7 = buf6; del buf6 # reuse # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.convolution] triton_poi_fused_convolution_1.run(buf7, primals_9, 256, grid=grid(256), stream=stream0) del primals_9 buf8 = empty_strided_cuda((4, 12, 4, 4), (192, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_cat_1], Original ATen: [aten.cat] triton_poi_fused_cat_2.run(buf2, buf5, buf7, primals_10, buf8, 768, grid=grid(768), stream=stream0) # Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.convolution] buf9 = extern_kernels.convolution(buf8, primals_11, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf9, (4, 4, 4, 4), (64, 16, 4, 1)) buf10 = buf9; del buf9 # reuse buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_6, x_7], Original ATen: [aten.convolution, aten._prelu_kernel] triton_poi_fused__prelu_kernel_convolution_0.run(buf10, primals_12, primals_13, buf11, 256, grid=grid(256), stream=stream0) del primals_12 # Topologically Sorted Source Nodes: [x_8], Original ATen: [aten.convolution] buf12 = extern_kernels.convolution(buf11, primals_14, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf12, (4, 4, 4, 4), (64, 16, 4, 1)) buf13 = buf12; del buf12 # reuse buf14 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_8, x_9], Original ATen: [aten.convolution, aten._prelu_kernel] triton_poi_fused__prelu_kernel_convolution_0.run(buf13, primals_15, primals_16, buf14, 256, grid=grid(256), stream=stream0) del primals_15 # Topologically Sorted Source Nodes: [x_10], Original ATen: [aten.convolution] buf15 = extern_kernels.convolution(buf14, primals_17, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf15, (4, 4, 4, 4), (64, 16, 4, 1)) buf16 = buf15; del buf15 # reuse # Topologically Sorted Source Nodes: [x_10], Original ATen: [aten.convolution] triton_poi_fused_convolution_1.run(buf16, primals_18, 256, grid=grid(256), stream=stream0) del primals_18 buf17 = empty_strided_cuda((4, 12, 4, 4), (192, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_cat_2], Original ATen: [aten.cat] triton_poi_fused_cat_2.run(buf11, buf14, buf16, primals_19, buf17, 768, grid=grid(768), stream=stream0) # Topologically Sorted Source Nodes: [x_12], Original ATen: [aten.convolution] buf18 = extern_kernels.convolution(buf17, primals_20, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf18, (4, 4, 4, 4), (64, 16, 4, 1)) buf19 = buf18; del buf18 # reuse buf20 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_12, x_13], Original ATen: [aten.convolution, aten._prelu_kernel] triton_poi_fused__prelu_kernel_convolution_0.run(buf19, primals_21, primals_22, buf20, 256, grid=grid(256), stream=stream0) del primals_21 # Topologically Sorted Source Nodes: [x_14], Original ATen: [aten.convolution] buf21 = extern_kernels.convolution(buf20, primals_23, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf21, (4, 4, 4, 4), (64, 16, 4, 1)) buf22 = buf21; del buf21 # reuse buf23 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_14, x_15], Original ATen: [aten.convolution, aten._prelu_kernel] triton_poi_fused__prelu_kernel_convolution_0.run(buf22, primals_24, primals_25, buf23, 256, grid=grid(256), stream=stream0) del primals_24 # Topologically Sorted Source Nodes: [x_16], Original ATen: [aten.convolution] buf24 = extern_kernels.convolution(buf23, primals_26, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf24, (4, 4, 4, 4), (64, 16, 4, 1)) buf25 = buf24; del buf24 # reuse # Topologically Sorted Source Nodes: [x_16], Original ATen: [aten.convolution] triton_poi_fused_convolution_1.run(buf25, primals_27, 256, grid=grid(256), stream=stream0) del primals_27 buf26 = empty_strided_cuda((4, 12, 4, 4), (192, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_cat_3], Original ATen: [aten.cat] triton_poi_fused_cat_2.run(buf20, buf23, buf25, primals_28, buf26, 768, grid=grid(768), stream=stream0) # Topologically Sorted Source Nodes: [x_18], Original ATen: [aten.convolution] buf27 = extern_kernels.convolution(buf26, primals_29, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf27, (4, 4, 4, 4), (64, 16, 4, 1)) buf28 = buf27; del buf27 # reuse buf29 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_18, x_19], Original ATen: [aten.convolution, aten._prelu_kernel] triton_poi_fused__prelu_kernel_convolution_0.run(buf28, primals_30, primals_31, buf29, 256, grid=grid(256), stream=stream0) del primals_30 # Topologically Sorted Source Nodes: [x_20], Original ATen: [aten.convolution] buf30 = extern_kernels.convolution(buf29, primals_32, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf30, (4, 4, 4, 4), (64, 16, 4, 1)) buf31 = buf30; del buf30 # reuse buf32 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_20, x_21], Original ATen: [aten.convolution, aten._prelu_kernel] triton_poi_fused__prelu_kernel_convolution_0.run(buf31, primals_33, primals_34, buf32, 256, grid=grid(256), stream=stream0) del primals_33 # Topologically Sorted Source Nodes: [x_22], Original ATen: [aten.convolution] buf33 = extern_kernels.convolution(buf32, primals_35, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf33, (4, 4, 4, 4), (64, 16, 4, 1)) buf34 = buf33; del buf33 # reuse # Topologically Sorted Source Nodes: [x_22], Original ATen: [aten.convolution] triton_poi_fused_convolution_1.run(buf34, primals_36, 256, grid=grid(256), stream=stream0) del primals_36 buf35 = empty_strided_cuda((4, 12, 4, 4), (192, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_cat_4], Original ATen: [aten.cat] triton_poi_fused_cat_2.run(buf29, buf32, buf34, primals_37, buf35, 768, grid=grid(768), stream=stream0) # Topologically Sorted Source Nodes: [x_24], Original ATen: [aten.convolution] buf36 = extern_kernels.convolution(buf35, primals_38, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf36, (4, 4, 4, 4), (64, 16, 4, 1)) buf37 = buf36; del buf36 # reuse buf38 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_24, x_25], Original ATen: [aten.convolution, aten._prelu_kernel] triton_poi_fused__prelu_kernel_convolution_0.run(buf37, primals_39, primals_40, buf38, 256, grid=grid(256), stream=stream0) del primals_39 # Topologically Sorted Source Nodes: [x_26], Original ATen: [aten.convolution] buf39 = extern_kernels.convolution(buf38, primals_41, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf39, (4, 4, 4, 4), (64, 16, 4, 1)) buf40 = buf39; del buf39 # reuse buf41 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_26, x_27], Original ATen: [aten.convolution, aten._prelu_kernel] triton_poi_fused__prelu_kernel_convolution_0.run(buf40, primals_42, primals_43, buf41, 256, grid=grid(256), stream=stream0) del primals_42 # Topologically Sorted Source Nodes: [x_28], Original ATen: [aten.convolution] buf42 = extern_kernels.convolution(buf41, primals_44, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf42, (4, 4, 4, 4), (64, 16, 4, 1)) buf43 = buf42; del buf42 # reuse # Topologically Sorted Source Nodes: [x_28], Original ATen: [aten.convolution] triton_poi_fused_convolution_1.run(buf43, primals_45, 256, grid=grid(256), stream=stream0) del primals_45 buf44 = empty_strided_cuda((4, 12, 4, 4), (192, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_cat_5], Original ATen: [aten.cat] triton_poi_fused_cat_2.run(buf38, buf41, buf43, primals_46, buf44, 768, grid=grid(768), stream=stream0) # Topologically Sorted Source Nodes: [x_30], Original ATen: [aten.convolution] buf45 = extern_kernels.convolution(buf44, primals_47, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf45, (4, 4, 4, 4), (64, 16, 4, 1)) buf46 = buf45; del buf45 # reuse buf47 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_30, x_31], Original ATen: [aten.convolution, aten._prelu_kernel] triton_poi_fused__prelu_kernel_convolution_0.run(buf46, primals_48, primals_49, buf47, 256, grid=grid(256), stream=stream0) del primals_48 # Topologically Sorted Source Nodes: [stage_output], Original ATen: [aten.convolution] buf48 = extern_kernels.convolution(buf47, primals_50, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf48, (4, 4, 4, 4), (64, 16, 4, 1)) buf49 = buf48; del buf48 # reuse # Topologically Sorted Source Nodes: [stage_output], Original ATen: [aten.convolution] triton_poi_fused_convolution_1.run(buf49, primals_51, 256, grid=grid(256), stream=stream0) del primals_51 return (buf49, primals_1, primals_3, primals_4, primals_5, primals_7, primals_8, primals_10, primals_11, primals_13, primals_14, primals_16, primals_17, primals_19, primals_20, primals_22, primals_23, primals_25, primals_26, primals_28, primals_29, primals_31, primals_32, primals_34, primals_35, primals_37, primals_38, primals_40, primals_41, primals_43, primals_44, primals_46, primals_47, primals_49, primals_50, buf1, buf2, buf4, buf5, buf7, buf8, buf10, buf11, buf13, buf14, buf16, buf17, buf19, buf20, buf22, buf23, buf25, buf26, buf28, buf29, buf31, buf32, buf34, buf35, buf37, buf38, buf40, buf41, buf43, buf44, buf46, buf47, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((4, 12, 3, 3), (108, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_13 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_14 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_15 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_16 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_17 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_18 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_19 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_20 = rand_strided((4, 12, 3, 3), (108, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_21 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_22 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_23 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_24 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_25 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_26 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_27 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_28 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_29 = rand_strided((4, 12, 3, 3), (108, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_30 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_31 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_32 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_33 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_34 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_35 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_36 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_37 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_38 = rand_strided((4, 12, 3, 3), (108, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_39 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_40 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_41 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_42 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_43 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_44 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_45 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_46 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_47 = rand_strided((4, 12, 1, 1), (12, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_48 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_49 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_50 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_51 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47, primals_48, primals_49, primals_50, primals_51]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.onnx import torch import torch.nn as nn import torch.utils.data class ConvBlock(nn.Module): def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding=1): super(ConvBlock, self).__init__() self.Mconv = nn.Conv2d(in_channels=in_channels, out_channels= out_channels, kernel_size=kernel_size, stride=stride, padding= padding) self.MPrelu = nn.PReLU(num_parameters=out_channels) def forward(self, x): x = self.Mconv(x) x = self.MPrelu(x) return x class StageBlock(nn.Module): """ L1/L2 StageBlock Template """ def __init__(self, in_channels, inner_channels, innerout_channels, out_channels): super(StageBlock, self).__init__() self.Mconv1_0 = ConvBlock(in_channels, inner_channels) self.Mconv1_1 = ConvBlock(inner_channels, inner_channels) self.Mconv1_2 = ConvBlock(inner_channels, inner_channels) self.Mconv2_0 = ConvBlock(inner_channels * 3, inner_channels) self.Mconv2_1 = ConvBlock(inner_channels, inner_channels) self.Mconv2_2 = ConvBlock(inner_channels, inner_channels) self.Mconv3_0 = ConvBlock(inner_channels * 3, inner_channels) self.Mconv3_1 = ConvBlock(inner_channels, inner_channels) self.Mconv3_2 = ConvBlock(inner_channels, inner_channels) self.Mconv4_0 = ConvBlock(inner_channels * 3, inner_channels) self.Mconv4_1 = ConvBlock(inner_channels, inner_channels) self.Mconv4_2 = ConvBlock(inner_channels, inner_channels) self.Mconv5_0 = ConvBlock(inner_channels * 3, inner_channels) self.Mconv5_1 = ConvBlock(inner_channels, inner_channels) self.Mconv5_2 = ConvBlock(inner_channels, inner_channels) self.Mconv6 = ConvBlock(inner_channels * 3, innerout_channels, kernel_size=1, stride=1, padding=0) self.Mconv7 = nn.Conv2d(in_channels=innerout_channels, out_channels =out_channels, kernel_size=1, stride=1, padding=0) def forward(self, x): out1_1 = self.Mconv1_0(x) out2_1 = self.Mconv1_1(out1_1) out3_1 = self.Mconv1_2(out2_1) x_cat_1 = torch.cat([out1_1, out2_1, out3_1], 1) out1_2 = self.Mconv2_0(x_cat_1) out2_2 = self.Mconv2_1(out1_2) out3_2 = self.Mconv2_2(out2_2) x_cat_2 = torch.cat([out1_2, out2_2, out3_2], 1) out1_3 = self.Mconv3_0(x_cat_2) out2_3 = self.Mconv3_1(out1_3) out3_3 = self.Mconv3_2(out2_3) x_cat_3 = torch.cat([out1_3, out2_3, out3_3], 1) out1_4 = self.Mconv4_0(x_cat_3) out2_4 = self.Mconv4_1(out1_4) out3_4 = self.Mconv4_2(out2_4) x_cat_4 = torch.cat([out1_4, out2_4, out3_4], 1) out1_5 = self.Mconv5_0(x_cat_4) out2_5 = self.Mconv5_1(out1_5) out3_5 = self.Mconv5_2(out2_5) x_cat_5 = torch.cat([out1_5, out2_5, out3_5], 1) out_6 = self.Mconv6(x_cat_5) stage_output = self.Mconv7(out_6) return stage_output def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'inner_channels': 4, 'innerout_channels': 4, 'out_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.onnx import torch import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__prelu_kernel_convolution_0(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp6 = tmp5 * tmp2 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(in_out_ptr0 + x3, tmp2, xmask) tl.store(out_ptr0 + x3, tmp7, xmask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused_cat_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 768 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 % 12 x0 = xindex % 16 x2 = xindex // 192 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + (x0 + 16 * (-4 + x1) + 64 * x2), tmp9 & xmask, other=0.0) tmp11 = tmp0 >= tmp7 tl.full([1], 12, tl.int64) tmp14 = tl.load(in_ptr2 + (x0 + 16 * (-8 + x1) + 64 * x2), tmp11 & xmask, other=0.0) tmp15 = 0.0 tmp16 = tmp14 > tmp15 tmp17 = tl.load(in_ptr3 + (-8 + x1), tmp11 & xmask, eviction_policy= 'evict_last', other=0.0) tmp18 = tmp17 * tmp14 tmp19 = tl.where(tmp16, tmp14, tmp18) tmp20 = tl.full(tmp19.shape, 0.0, tmp19.dtype) tmp21 = tl.where(tmp11, tmp19, tmp20) tmp22 = tl.where(tmp9, tmp10, tmp21) tmp23 = tl.where(tmp4, tmp5, tmp22) tl.store(out_ptr0 + x3, tmp23, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47, primals_48, primals_49, primals_50, primals_51) = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_9, (4,), (1,)) assert_size_stride(primals_10, (4,), (1,)) assert_size_stride(primals_11, (4, 12, 3, 3), (108, 9, 3, 1)) assert_size_stride(primals_12, (4,), (1,)) assert_size_stride(primals_13, (4,), (1,)) assert_size_stride(primals_14, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_15, (4,), (1,)) assert_size_stride(primals_16, (4,), (1,)) assert_size_stride(primals_17, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_18, (4,), (1,)) assert_size_stride(primals_19, (4,), (1,)) assert_size_stride(primals_20, (4, 12, 3, 3), (108, 9, 3, 1)) assert_size_stride(primals_21, (4,), (1,)) assert_size_stride(primals_22, (4,), (1,)) assert_size_stride(primals_23, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_24, (4,), (1,)) assert_size_stride(primals_25, (4,), (1,)) assert_size_stride(primals_26, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_27, (4,), (1,)) assert_size_stride(primals_28, (4,), (1,)) assert_size_stride(primals_29, (4, 12, 3, 3), (108, 9, 3, 1)) assert_size_stride(primals_30, (4,), (1,)) assert_size_stride(primals_31, (4,), (1,)) assert_size_stride(primals_32, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_33, (4,), (1,)) assert_size_stride(primals_34, (4,), (1,)) assert_size_stride(primals_35, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_36, (4,), (1,)) assert_size_stride(primals_37, (4,), (1,)) assert_size_stride(primals_38, (4, 12, 3, 3), (108, 9, 3, 1)) assert_size_stride(primals_39, (4,), (1,)) assert_size_stride(primals_40, (4,), (1,)) assert_size_stride(primals_41, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_42, (4,), (1,)) assert_size_stride(primals_43, (4,), (1,)) assert_size_stride(primals_44, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_45, (4,), (1,)) assert_size_stride(primals_46, (4,), (1,)) assert_size_stride(primals_47, (4, 12, 1, 1), (12, 1, 1, 1)) assert_size_stride(primals_48, (4,), (1,)) assert_size_stride(primals_49, (4,), (1,)) assert_size_stride(primals_50, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_51, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = buf0 del buf0 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__prelu_kernel_convolution_0[grid(256)](buf1, primals_2, primals_4, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf3 = extern_kernels.convolution(buf2, primals_5, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1)) buf4 = buf3 del buf3 buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__prelu_kernel_convolution_0[grid(256)](buf4, primals_6, primals_7, buf5, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_6 buf6 = extern_kernels.convolution(buf5, primals_8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 4, 4, 4), (64, 16, 4, 1)) buf7 = buf6 del buf6 triton_poi_fused_convolution_1[grid(256)](buf7, primals_9, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_9 buf8 = empty_strided_cuda((4, 12, 4, 4), (192, 16, 4, 1), torch.float32 ) triton_poi_fused_cat_2[grid(768)](buf2, buf5, buf7, primals_10, buf8, 768, XBLOCK=128, num_warps=4, num_stages=1) buf9 = extern_kernels.convolution(buf8, primals_11, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf9, (4, 4, 4, 4), (64, 16, 4, 1)) buf10 = buf9 del buf9 buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__prelu_kernel_convolution_0[grid(256)](buf10, primals_12, primals_13, buf11, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_12 buf12 = extern_kernels.convolution(buf11, primals_14, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf12, (4, 4, 4, 4), (64, 16, 4, 1)) buf13 = buf12 del buf12 buf14 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__prelu_kernel_convolution_0[grid(256)](buf13, primals_15, primals_16, buf14, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_15 buf15 = extern_kernels.convolution(buf14, primals_17, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf15, (4, 4, 4, 4), (64, 16, 4, 1)) buf16 = buf15 del buf15 triton_poi_fused_convolution_1[grid(256)](buf16, primals_18, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_18 buf17 = empty_strided_cuda((4, 12, 4, 4), (192, 16, 4, 1), torch. float32) triton_poi_fused_cat_2[grid(768)](buf11, buf14, buf16, primals_19, buf17, 768, XBLOCK=128, num_warps=4, num_stages=1) buf18 = extern_kernels.convolution(buf17, primals_20, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf18, (4, 4, 4, 4), (64, 16, 4, 1)) buf19 = buf18 del buf18 buf20 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__prelu_kernel_convolution_0[grid(256)](buf19, primals_21, primals_22, buf20, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_21 buf21 = extern_kernels.convolution(buf20, primals_23, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf21, (4, 4, 4, 4), (64, 16, 4, 1)) buf22 = buf21 del buf21 buf23 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__prelu_kernel_convolution_0[grid(256)](buf22, primals_24, primals_25, buf23, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_24 buf24 = extern_kernels.convolution(buf23, primals_26, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf24, (4, 4, 4, 4), (64, 16, 4, 1)) buf25 = buf24 del buf24 triton_poi_fused_convolution_1[grid(256)](buf25, primals_27, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_27 buf26 = empty_strided_cuda((4, 12, 4, 4), (192, 16, 4, 1), torch. float32) triton_poi_fused_cat_2[grid(768)](buf20, buf23, buf25, primals_28, buf26, 768, XBLOCK=128, num_warps=4, num_stages=1) buf27 = extern_kernels.convolution(buf26, primals_29, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf27, (4, 4, 4, 4), (64, 16, 4, 1)) buf28 = buf27 del buf27 buf29 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__prelu_kernel_convolution_0[grid(256)](buf28, primals_30, primals_31, buf29, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_30 buf30 = extern_kernels.convolution(buf29, primals_32, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf30, (4, 4, 4, 4), (64, 16, 4, 1)) buf31 = buf30 del buf30 buf32 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__prelu_kernel_convolution_0[grid(256)](buf31, primals_33, primals_34, buf32, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_33 buf33 = extern_kernels.convolution(buf32, primals_35, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf33, (4, 4, 4, 4), (64, 16, 4, 1)) buf34 = buf33 del buf33 triton_poi_fused_convolution_1[grid(256)](buf34, primals_36, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_36 buf35 = empty_strided_cuda((4, 12, 4, 4), (192, 16, 4, 1), torch. float32) triton_poi_fused_cat_2[grid(768)](buf29, buf32, buf34, primals_37, buf35, 768, XBLOCK=128, num_warps=4, num_stages=1) buf36 = extern_kernels.convolution(buf35, primals_38, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf36, (4, 4, 4, 4), (64, 16, 4, 1)) buf37 = buf36 del buf36 buf38 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__prelu_kernel_convolution_0[grid(256)](buf37, primals_39, primals_40, buf38, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_39 buf39 = extern_kernels.convolution(buf38, primals_41, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf39, (4, 4, 4, 4), (64, 16, 4, 1)) buf40 = buf39 del buf39 buf41 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__prelu_kernel_convolution_0[grid(256)](buf40, primals_42, primals_43, buf41, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_42 buf42 = extern_kernels.convolution(buf41, primals_44, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf42, (4, 4, 4, 4), (64, 16, 4, 1)) buf43 = buf42 del buf42 triton_poi_fused_convolution_1[grid(256)](buf43, primals_45, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_45 buf44 = empty_strided_cuda((4, 12, 4, 4), (192, 16, 4, 1), torch. float32) triton_poi_fused_cat_2[grid(768)](buf38, buf41, buf43, primals_46, buf44, 768, XBLOCK=128, num_warps=4, num_stages=1) buf45 = extern_kernels.convolution(buf44, primals_47, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf45, (4, 4, 4, 4), (64, 16, 4, 1)) buf46 = buf45 del buf45 buf47 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__prelu_kernel_convolution_0[grid(256)](buf46, primals_48, primals_49, buf47, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_48 buf48 = extern_kernels.convolution(buf47, primals_50, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf48, (4, 4, 4, 4), (64, 16, 4, 1)) buf49 = buf48 del buf48 triton_poi_fused_convolution_1[grid(256)](buf49, primals_51, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_51 return (buf49, primals_1, primals_3, primals_4, primals_5, primals_7, primals_8, primals_10, primals_11, primals_13, primals_14, primals_16, primals_17, primals_19, primals_20, primals_22, primals_23, primals_25, primals_26, primals_28, primals_29, primals_31, primals_32, primals_34, primals_35, primals_37, primals_38, primals_40, primals_41, primals_43, primals_44, primals_46, primals_47, primals_49, primals_50, buf1, buf2, buf4, buf5, buf7, buf8, buf10, buf11, buf13, buf14, buf16, buf17, buf19, buf20, buf22, buf23, buf25, buf26, buf28, buf29, buf31, buf32, buf34, buf35, buf37, buf38, buf40, buf41, buf43, buf44, buf46, buf47) class ConvBlock(nn.Module): def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding=1): super(ConvBlock, self).__init__() self.Mconv = nn.Conv2d(in_channels=in_channels, out_channels= out_channels, kernel_size=kernel_size, stride=stride, padding= padding) self.MPrelu = nn.PReLU(num_parameters=out_channels) def forward(self, x): x = self.Mconv(x) x = self.MPrelu(x) return x class StageBlockNew(nn.Module): """ L1/L2 StageBlock Template """ def __init__(self, in_channels, inner_channels, innerout_channels, out_channels): super(StageBlockNew, self).__init__() self.Mconv1_0 = ConvBlock(in_channels, inner_channels) self.Mconv1_1 = ConvBlock(inner_channels, inner_channels) self.Mconv1_2 = ConvBlock(inner_channels, inner_channels) self.Mconv2_0 = ConvBlock(inner_channels * 3, inner_channels) self.Mconv2_1 = ConvBlock(inner_channels, inner_channels) self.Mconv2_2 = ConvBlock(inner_channels, inner_channels) self.Mconv3_0 = ConvBlock(inner_channels * 3, inner_channels) self.Mconv3_1 = ConvBlock(inner_channels, inner_channels) self.Mconv3_2 = ConvBlock(inner_channels, inner_channels) self.Mconv4_0 = ConvBlock(inner_channels * 3, inner_channels) self.Mconv4_1 = ConvBlock(inner_channels, inner_channels) self.Mconv4_2 = ConvBlock(inner_channels, inner_channels) self.Mconv5_0 = ConvBlock(inner_channels * 3, inner_channels) self.Mconv5_1 = ConvBlock(inner_channels, inner_channels) self.Mconv5_2 = ConvBlock(inner_channels, inner_channels) self.Mconv6 = ConvBlock(inner_channels * 3, innerout_channels, kernel_size=1, stride=1, padding=0) self.Mconv7 = nn.Conv2d(in_channels=innerout_channels, out_channels =out_channels, kernel_size=1, stride=1, padding=0) def forward(self, input_0): primals_1 = self.Mconv1_0.Mconv.weight primals_2 = self.Mconv1_0.Mconv.bias primals_4 = self.Mconv1_0.MPrelu.weight primals_5 = self.Mconv1_1.Mconv.weight primals_6 = self.Mconv1_1.Mconv.bias primals_7 = self.Mconv1_1.MPrelu.weight primals_8 = self.Mconv1_2.Mconv.weight primals_9 = self.Mconv1_2.Mconv.bias primals_10 = self.Mconv1_2.MPrelu.weight primals_11 = self.Mconv2_0.Mconv.weight primals_12 = self.Mconv2_0.Mconv.bias primals_13 = self.Mconv2_0.MPrelu.weight primals_14 = self.Mconv2_1.Mconv.weight primals_15 = self.Mconv2_1.Mconv.bias primals_16 = self.Mconv2_1.MPrelu.weight primals_17 = self.Mconv2_2.Mconv.weight primals_18 = self.Mconv2_2.Mconv.bias primals_19 = self.Mconv2_2.MPrelu.weight primals_20 = self.Mconv3_0.Mconv.weight primals_21 = self.Mconv3_0.Mconv.bias primals_22 = self.Mconv3_0.MPrelu.weight primals_23 = self.Mconv3_1.Mconv.weight primals_24 = self.Mconv3_1.Mconv.bias primals_25 = self.Mconv3_1.MPrelu.weight primals_26 = self.Mconv3_2.Mconv.weight primals_27 = self.Mconv3_2.Mconv.bias primals_28 = self.Mconv3_2.MPrelu.weight primals_29 = self.Mconv4_0.Mconv.weight primals_30 = self.Mconv4_0.Mconv.bias primals_31 = self.Mconv4_0.MPrelu.weight primals_32 = self.Mconv4_1.Mconv.weight primals_33 = self.Mconv4_1.Mconv.bias primals_34 = self.Mconv4_1.MPrelu.weight primals_35 = self.Mconv4_2.Mconv.weight primals_36 = self.Mconv4_2.Mconv.bias primals_37 = self.Mconv4_2.MPrelu.weight primals_38 = self.Mconv5_0.Mconv.weight primals_39 = self.Mconv5_0.Mconv.bias primals_40 = self.Mconv5_0.MPrelu.weight primals_41 = self.Mconv5_1.Mconv.weight primals_42 = self.Mconv5_1.Mconv.bias primals_43 = self.Mconv5_1.MPrelu.weight primals_44 = self.Mconv5_2.Mconv.weight primals_45 = self.Mconv5_2.Mconv.bias primals_46 = self.Mconv5_2.MPrelu.weight primals_47 = self.Mconv6.Mconv.weight primals_48 = self.Mconv6.Mconv.bias primals_49 = self.Mconv6.MPrelu.weight primals_50 = self.Mconv7.weight primals_51 = self.Mconv7.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47, primals_48, primals_49, primals_50, primals_51]) return output[0]
IrohXu/Infant-Pose-pytorch
StageBlock
false
5,368
[ "MIT" ]
1
148c43fbfefe06ec2fffa7055049c3ff341154f8
https://github.com/IrohXu/Infant-Pose-pytorch/tree/148c43fbfefe06ec2fffa7055049c3ff341154f8
import torch import torch.onnx import torch import torch.nn as nn import torch.utils.data class ConvBlock(nn.Module): def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding=1): super().__init__() self.Mconv = nn.Conv2d(in_channels=in_channels, out_channels= out_channels, kernel_size=kernel_size, stride=stride, padding= padding) self.MPrelu = nn.PReLU(num_parameters=out_channels) def forward(self, x): x = self.Mconv(x) x = self.MPrelu(x) return x class Model(nn.Module): """ L1/L2 StageBlock Template """ def __init__(self, in_channels, inner_channels, innerout_channels, out_channels): super().__init__() self.Mconv1_0 = ConvBlock(in_channels, inner_channels) self.Mconv1_1 = ConvBlock(inner_channels, inner_channels) self.Mconv1_2 = ConvBlock(inner_channels, inner_channels) self.Mconv2_0 = ConvBlock(inner_channels * 3, inner_channels) self.Mconv2_1 = ConvBlock(inner_channels, inner_channels) self.Mconv2_2 = ConvBlock(inner_channels, inner_channels) self.Mconv3_0 = ConvBlock(inner_channels * 3, inner_channels) self.Mconv3_1 = ConvBlock(inner_channels, inner_channels) self.Mconv3_2 = ConvBlock(inner_channels, inner_channels) self.Mconv4_0 = ConvBlock(inner_channels * 3, inner_channels) self.Mconv4_1 = ConvBlock(inner_channels, inner_channels) self.Mconv4_2 = ConvBlock(inner_channels, inner_channels) self.Mconv5_0 = ConvBlock(inner_channels * 3, inner_channels) self.Mconv5_1 = ConvBlock(inner_channels, inner_channels) self.Mconv5_2 = ConvBlock(inner_channels, inner_channels) self.Mconv6 = ConvBlock(inner_channels * 3, innerout_channels, kernel_size=1, stride=1, padding=0) self.Mconv7 = nn.Conv2d(in_channels=innerout_channels, out_channels =out_channels, kernel_size=1, stride=1, padding=0) def forward(self, x): out1_1 = self.Mconv1_0(x) out2_1 = self.Mconv1_1(out1_1) out3_1 = self.Mconv1_2(out2_1) x_cat_1 = torch.cat([out1_1, out2_1, out3_1], 1) out1_2 = self.Mconv2_0(x_cat_1) out2_2 = self.Mconv2_1(out1_2) out3_2 = self.Mconv2_2(out2_2) x_cat_2 = torch.cat([out1_2, out2_2, out3_2], 1) out1_3 = self.Mconv3_0(x_cat_2) out2_3 = self.Mconv3_1(out1_3) out3_3 = self.Mconv3_2(out2_3) x_cat_3 = torch.cat([out1_3, out2_3, out3_3], 1) out1_4 = self.Mconv4_0(x_cat_3) out2_4 = self.Mconv4_1(out1_4) out3_4 = self.Mconv4_2(out2_4) x_cat_4 = torch.cat([out1_4, out2_4, out3_4], 1) out1_5 = self.Mconv5_0(x_cat_4) out2_5 = self.Mconv5_1(out1_5) out3_5 = self.Mconv5_2(out2_5) x_cat_5 = torch.cat([out1_5, out2_5, out3_5], 1) out_6 = self.Mconv6(x_cat_5) stage_output = self.Mconv7(out_6) return stage_output def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'inner_channels': 4, 'innerout_channels': 4, 'out_channels': 4}]
DistillKL
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/mc/cmc44gqwlbgitm3uqkuiwz6fe3jirwculg7zmyndeuqzyyqzyok7.py # Topologically Sorted Source Nodes: [p_t], Original ATen: [aten._softmax] # Source node to ATen node mapping: # p_t => exp_1 # Graph fragment: # %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, 1), kwargs = {}) # %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [1], True), kwargs = {}) # %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {}) # %div_tensor : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor, 4), kwargs = {}) # %exp_1 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor,), kwargs = {}) triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp3 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = 0.25 tmp16 = tmp14 * tmp15 tmp17 = tl_math.exp(tmp16) tl.store(out_ptr0 + (x3), tmp17, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/xg/cxg6geasclvgycjnyaybokxud5rdp2fe6eropfaplher4ysvlw4g.py # Topologically Sorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: # Graph fragment: # %mul_tensor_1 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, 1), kwargs = {}) # %amax_default_1 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor_1, [1], True), kwargs = {}) # %sub_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor_1, %amax_default_1), kwargs = {}) # %div_tensor_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor_1, 4), kwargs = {}) triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp3 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = 0.25 tmp16 = tmp14 * tmp15 tl.store(out_ptr0 + (x3), tmp16, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/bn/cbnq3b4f3wkxjyazqbgxcpo4q6wewzigsbhu3mgms7fjutjswpex.py # Topologically Sorted Source Nodes: [p_t, kl_div, p_s, mul, loss], Original ATen: [aten._softmax, aten.xlogy, aten._log_softmax, aten.mul, aten.sub, aten.sum, aten.div] # Source node to ATen node mapping: # kl_div => eq, full_default, full_default_1, isnan, log_1, mul, mul_1, sub_3, sum_3, where, where_1 # loss => div_3 # mul => mul_2 # p_s => exp, log, sub_1, sum_1 # p_t => div_2, sum_2 # Graph fragment: # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_1, [1], True), kwargs = {}) # %div_2 : [num_users=5] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp_1, %sum_2), kwargs = {}) # %isnan : [num_users=1] = call_function[target=torch.ops.aten.isnan.default](args = (%div_2,), kwargs = {}) # %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], nan), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %eq : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%div_2, 0), kwargs = {}) # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %log_1 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%div_2,), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div_2, %log_1), kwargs = {}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%eq, %full_default, %mul_1), kwargs = {}) # %where_1 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%isnan, %full_default_1, %where), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor_1,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div_tensor_1, %log), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div_2, %sub_1), kwargs = {}) # %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where_1, %mul), kwargs = {}) # %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%sub_3,), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_3, 16), kwargs = {}) # %div_3 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_2, 4), kwargs = {}) triton_per_fused__log_softmax__softmax_div_mul_sub_sum_xlogy_2 = async_compile.triton('triton_per_fused__log_softmax__softmax_div_mul_sub_sum_xlogy_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__log_softmax__softmax_div_mul_sub_sum_xlogy_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 10, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__log_softmax__softmax_div_mul_sub_sum_xlogy_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r3 = rindex r0 = rindex % 16 r2 = (rindex // 64) tmp0 = tl.load(in_ptr0 + (r3), None) tmp1 = tl.load(in_ptr0 + (r0 + (64*r2)), None, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (16 + r0 + (64*r2)), None, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (32 + r0 + (64*r2)), None, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (48 + r0 + (64*r2)), None, eviction_policy='evict_last') tmp17 = tl.load(in_ptr1 + (r3), None) tmp18 = tl.load(in_ptr1 + (r0 + (64*r2)), None, eviction_policy='evict_last') tmp20 = tl.load(in_ptr1 + (16 + r0 + (64*r2)), None, eviction_policy='evict_last') tmp23 = tl.load(in_ptr1 + (32 + r0 + (64*r2)), None, eviction_policy='evict_last') tmp26 = tl.load(in_ptr1 + (48 + r0 + (64*r2)), None, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tmp9 = libdevice.isnan(tmp8).to(tl.int1) tmp10 = 0.0 tmp11 = tmp8 == tmp10 tmp12 = tl_math.log(tmp8) tmp13 = tmp8 * tmp12 tmp14 = tl.where(tmp11, tmp10, tmp13) tmp15 = float("nan") tmp16 = tl.where(tmp9, tmp15, tmp14) tmp19 = tl_math.exp(tmp18) tmp21 = tl_math.exp(tmp20) tmp22 = tmp19 + tmp21 tmp24 = tl_math.exp(tmp23) tmp25 = tmp22 + tmp24 tmp27 = tl_math.exp(tmp26) tmp28 = tmp25 + tmp27 tmp29 = tl_math.log(tmp28) tmp30 = tmp17 - tmp29 tmp31 = tmp8 * tmp30 tmp32 = tmp16 - tmp31 tmp33 = tl.broadcast_to(tmp32, [RBLOCK]) tmp35 = triton_helpers.promote_to_tensor(tl.sum(tmp33, 0)) tmp36 = 16.0 tmp37 = tmp35 * tmp36 tmp38 = 0.25 tmp39 = tmp37 * tmp38 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp39, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [p_t], Original ATen: [aten._softmax] stream0 = get_raw_stream(0) triton_poi_fused__softmax_0.run(arg1_1, buf0, 256, grid=grid(256), stream=stream0) del arg1_1 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] triton_poi_fused_1.run(arg0_1, buf2, 256, grid=grid(256), stream=stream0) del arg0_1 buf3 = empty_strided_cuda((), (), torch.float32) buf4 = buf3; del buf3 # reuse # Topologically Sorted Source Nodes: [p_t, kl_div, p_s, mul, loss], Original ATen: [aten._softmax, aten.xlogy, aten._log_softmax, aten.mul, aten.sub, aten.sum, aten.div] triton_per_fused__log_softmax__softmax_div_mul_sub_sum_xlogy_2.run(buf4, buf0, buf2, 1, 256, grid=grid(1), stream=stream0) del buf0 del buf2 return (buf4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F from torch import nn class DistillKL(nn.Module): """KL divergence for distillation""" def __init__(self, T): super(DistillKL, self).__init__() self.T = T def forward(self, y_s, y_t): p_s = F.log_softmax(y_s / self.T, dim=1) p_t = F.softmax(y_t / self.T, dim=1) loss = F.kl_div(p_s, p_t, size_average=False ) * self.T ** 2 / y_s.shape[0] return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'T': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp3 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp5 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp8 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp11 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = 0.25 tmp16 = tmp14 * tmp15 tmp17 = tl_math.exp(tmp16) tl.store(out_ptr0 + x3, tmp17, xmask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp3 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp5 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp8 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp11 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = 0.25 tmp16 = tmp14 * tmp15 tl.store(out_ptr0 + x3, tmp16, xmask) @triton.jit def triton_per_fused__log_softmax__softmax_div_mul_sub_sum_xlogy_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r3 = rindex r0 = rindex % 16 r2 = rindex // 64 tmp0 = tl.load(in_ptr0 + r3, None) tmp1 = tl.load(in_ptr0 + (r0 + 64 * r2), None, eviction_policy='evict_last' ) tmp2 = tl.load(in_ptr0 + (16 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp17 = tl.load(in_ptr1 + r3, None) tmp18 = tl.load(in_ptr1 + (r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp20 = tl.load(in_ptr1 + (16 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp23 = tl.load(in_ptr1 + (32 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp26 = tl.load(in_ptr1 + (48 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tmp9 = libdevice.isnan(tmp8).to(tl.int1) tmp10 = 0.0 tmp11 = tmp8 == tmp10 tmp12 = tl_math.log(tmp8) tmp13 = tmp8 * tmp12 tmp14 = tl.where(tmp11, tmp10, tmp13) tmp15 = float('nan') tmp16 = tl.where(tmp9, tmp15, tmp14) tmp19 = tl_math.exp(tmp18) tmp21 = tl_math.exp(tmp20) tmp22 = tmp19 + tmp21 tmp24 = tl_math.exp(tmp23) tmp25 = tmp22 + tmp24 tmp27 = tl_math.exp(tmp26) tmp28 = tmp25 + tmp27 tmp29 = tl_math.log(tmp28) tmp30 = tmp17 - tmp29 tmp31 = tmp8 * tmp30 tmp32 = tmp16 - tmp31 tmp33 = tl.broadcast_to(tmp32, [RBLOCK]) tmp35 = triton_helpers.promote_to_tensor(tl.sum(tmp33, 0)) tmp36 = 16.0 tmp37 = tmp35 * tmp36 tmp38 = 0.25 tmp39 = tmp37 * tmp38 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp39, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(256)](arg1_1, buf0, 256, XBLOCK= 256, num_warps=4, num_stages=1) del arg1_1 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_1[grid(256)](arg0_1, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 buf3 = empty_strided_cuda((), (), torch.float32) buf4 = buf3 del buf3 triton_per_fused__log_softmax__softmax_div_mul_sub_sum_xlogy_2[grid(1) ](buf4, buf0, buf2, 1, 256, num_warps=2, num_stages=1) del buf0 del buf2 return buf4, class DistillKLNew(nn.Module): """KL divergence for distillation""" def __init__(self, T): super(DistillKLNew, self).__init__() self.T = T def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
JJuOn/Few-shot_Class_Incremental_Learning
DistillKL
false
5,369
[ "MIT" ]
1
a2178051a6fefcd73b60f5e4236116bf828a801c
https://github.com/JJuOn/Few-shot_Class_Incremental_Learning/tree/a2178051a6fefcd73b60f5e4236116bf828a801c
import torch import torch.nn.functional as F from torch import nn class Model(nn.Module): """KL divergence for distillation""" def __init__(self, T): super().__init__() self.T = T def forward(self, y_s, y_t): p_s = F.log_softmax(y_s / self.T, dim=1) p_t = F.softmax(y_t / self.T, dim=1) loss = F.kl_div(p_s, p_t, size_average=False ) * self.T ** 2 / y_s.shape[0] return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4]
FastRCNNPredictor
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/tf/ctfobpckmiv3kkga3a6gzs6unuclcnxpb4xc2h5r3udgxgix4ip5.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu] # Source node to ATen node mapping: # x_1 => relu # Graph fragment: # %add_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_3), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_1,), kwargs = {}) triton_poi_fused_relu_0 = async_compile.triton('triton_poi_fused_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4, ), (1, )) assert_size_stride(primals_8, (16, 4), (4, 1)) assert_size_stride(primals_9, (16, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_relu_0.run(buf1, primals_3, 16, grid=grid(16), stream=stream0) del primals_3 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2) buf3 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.relu] triton_poi_fused_relu_0.run(buf3, primals_5, 16, grid=grid(16), stream=stream0) del primals_5 buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [score], Original ATen: [aten.addmm] extern_kernels.addmm(primals_7, buf3, reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf4) del primals_7 buf5 = empty_strided_cuda((4, 16), (16, 1), torch.float32) # Topologically Sorted Source Nodes: [bbox_delta], Original ATen: [aten.addmm] extern_kernels.addmm(primals_9, buf3, reinterpret_tensor(primals_8, (4, 16), (1, 4), 0), alpha=1, beta=1, out=buf5) del primals_9 return (buf4, buf5, primals_1, buf1, buf3, primals_8, primals_6, primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F from torch import nn class FastRCNNPredictor(nn.Module): def __init__(self, in_channels, mid_channels, num_classes): super().__init__() self.fc1 = nn.Linear(in_channels, mid_channels) self.fc2 = nn.Linear(mid_channels, mid_channels) self.cls_score = nn.Linear(mid_channels, num_classes) self.bbox_pred = nn.Linear(mid_channels, num_classes * 4) def forward(self, x): x = x.flatten(start_dim=1) x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) score = self.cls_score(x) bbox_delta = self.bbox_pred(x) return score, bbox_delta def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'mid_channels': 4, 'num_classes': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (16, 4), (4, 1)) assert_size_stride(primals_9, (16,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_relu_0[grid(16)](buf1, primals_3, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_3 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (4, 4), (1, 4 ), 0), out=buf2) buf3 = buf2 del buf2 triton_poi_fused_relu_0[grid(16)](buf3, primals_5, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_5 buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, buf3, reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf4) del primals_7 buf5 = empty_strided_cuda((4, 16), (16, 1), torch.float32) extern_kernels.addmm(primals_9, buf3, reinterpret_tensor(primals_8, (4, 16), (1, 4), 0), alpha=1, beta=1, out=buf5) del primals_9 return buf4, buf5, primals_1, buf1, buf3, primals_8, primals_6, primals_4 class FastRCNNPredictorNew(nn.Module): def __init__(self, in_channels, mid_channels, num_classes): super().__init__() self.fc1 = nn.Linear(in_channels, mid_channels) self.fc2 = nn.Linear(mid_channels, mid_channels) self.cls_score = nn.Linear(mid_channels, num_classes) self.bbox_pred = nn.Linear(mid_channels, num_classes * 4) def forward(self, input_0): primals_1 = self.fc1.weight primals_3 = self.fc1.bias primals_2 = self.fc2.weight primals_5 = self.fc2.bias primals_4 = self.cls_score.weight primals_7 = self.cls_score.bias primals_8 = self.bbox_pred.weight primals_9 = self.bbox_pred.bias primals_6 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0], output[1]
Jaramies/PyTorch-Simple-MaskRCNN
FastRCNNPredictor
false
5,370
[ "MIT" ]
1
21e6c6983b34061800280573ebe705ae17212972
https://github.com/Jaramies/PyTorch-Simple-MaskRCNN/tree/21e6c6983b34061800280573ebe705ae17212972
import torch import torch.nn.functional as F from torch import nn class Model(nn.Module): def __init__(self, in_channels, mid_channels, num_classes): super().__init__() self.fc1 = nn.Linear(in_channels, mid_channels) self.fc2 = nn.Linear(mid_channels, mid_channels) self.cls_score = nn.Linear(mid_channels, num_classes) self.bbox_pred = nn.Linear(mid_channels, num_classes * 4) def forward(self, x): x = x.flatten(start_dim=1) x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) score = self.cls_score(x) bbox_delta = self.bbox_pred(x) return score, bbox_delta def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [4, 4, 4]
RPNHead
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/td/ctdybbibnws4d7ukbk3fpn35zkgapxylowdhzwx7vgsllncbdrxa.py # Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d => convolution # x => relu # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu : [num_users=3] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/32/c32v7egt4mupqssam3gmac2qgv3ujprjybthsgweflmot256qqw7.py # Topologically Sorted Source Nodes: [logits], Original ATen: [aten.convolution] # Source node to ATen node mapping: # logits => convolution_1 # Graph fragment: # %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/ce/cce4eqfnjrkjabnwsgsb2usrigptngwve65kohgxy7tghysxb3yw.py # Topologically Sorted Source Nodes: [bbox_reg], Original ATen: [aten.convolution] # Source node to ATen node mapping: # bbox_reg => convolution_2 # Graph fragment: # %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_6, %primals_7, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_2 = async_compile.triton('triton_poi_fused_convolution_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 16 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (16, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_7, (16, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 256, grid=grid(256), stream=stream0) del primals_2 # Topologically Sorted Source Nodes: [logits], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1)) buf3 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [logits], Original ATen: [aten.convolution] triton_poi_fused_convolution_1.run(buf3, primals_5, 256, grid=grid(256), stream=stream0) del primals_5 # Topologically Sorted Source Nodes: [bbox_reg], Original ATen: [aten.convolution] buf4 = extern_kernels.convolution(buf1, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 16, 4, 4), (256, 16, 4, 1)) buf5 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [bbox_reg], Original ATen: [aten.convolution] triton_poi_fused_convolution_2.run(buf5, primals_7, 1024, grid=grid(1024), stream=stream0) del primals_7 return (buf3, buf5, primals_1, primals_3, primals_4, primals_6, buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((16, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F from torch import nn class RPNHead(nn.Module): def __init__(self, in_channels, num_anchors): super().__init__() self.conv = nn.Conv2d(in_channels, in_channels, 3, 1, 1) self.cls_logits = nn.Conv2d(in_channels, num_anchors, 1) self.bbox_pred = nn.Conv2d(in_channels, 4 * num_anchors, 1) for l in self.children(): nn.init.normal_(l.weight, std=0.01) nn.init.constant_(l.bias, 0) def forward(self, x): x = F.relu(self.conv(x)) logits = self.cls_logits(x) bbox_reg = self.bbox_pred(x) return logits, bbox_reg def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'num_anchors': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 16 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (16, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_7, (16,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(256)](buf1, primals_2, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_1[grid(256)](buf3, primals_5, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf4 = extern_kernels.convolution(buf1, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 16, 4, 4), (256, 16, 4, 1)) buf5 = buf4 del buf4 triton_poi_fused_convolution_2[grid(1024)](buf5, primals_7, 1024, XBLOCK=256, num_warps=4, num_stages=1) del primals_7 return buf3, buf5, primals_1, primals_3, primals_4, primals_6, buf1 class RPNHeadNew(nn.Module): def __init__(self, in_channels, num_anchors): super().__init__() self.conv = nn.Conv2d(in_channels, in_channels, 3, 1, 1) self.cls_logits = nn.Conv2d(in_channels, num_anchors, 1) self.bbox_pred = nn.Conv2d(in_channels, 4 * num_anchors, 1) for l in self.children(): nn.init.normal_(l.weight, std=0.01) nn.init.constant_(l.bias, 0) def forward(self, input_0): primals_1 = self.conv.weight primals_2 = self.conv.bias primals_4 = self.cls_logits.weight primals_5 = self.cls_logits.bias primals_6 = self.bbox_pred.weight primals_7 = self.bbox_pred.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0], output[1]
Jaramies/PyTorch-Simple-MaskRCNN
RPNHead
false
5,371
[ "MIT" ]
1
21e6c6983b34061800280573ebe705ae17212972
https://github.com/Jaramies/PyTorch-Simple-MaskRCNN/tree/21e6c6983b34061800280573ebe705ae17212972
import torch import torch.nn.functional as F from torch import nn class Model(nn.Module): def __init__(self, in_channels, num_anchors): super().__init__() self.conv = nn.Conv2d(in_channels, in_channels, 3, 1, 1) self.cls_logits = nn.Conv2d(in_channels, num_anchors, 1) self.bbox_pred = nn.Conv2d(in_channels, 4 * num_anchors, 1) for l in self.children(): nn.init.normal_(l.weight, std=0.01) nn.init.constant_(l.bias, 0) def forward(self, x): x = F.relu(self.conv(x)) logits = self.cls_logits(x) bbox_reg = self.bbox_pred(x) return logits, bbox_reg def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 4]
GlobalAvgPool2d
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/is/cispe7zbbl4nxt2jjus6h5iou2w7htohqj7z2oz6g7nqz6vbpbqr.py # Topologically Sorted Source Nodes: [avg_pool2d], Original ATen: [aten.avg_pool2d] # Source node to ATen node mapping: # avg_pool2d => avg_pool2d # Graph fragment: # %avg_pool2d : [num_users=1] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%arg0_1, [4, 4]), kwargs = {}) triton_poi_fused_avg_pool2d_0 = async_compile.triton('triton_poi_fused_avg_pool2d_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_avg_pool2d_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_avg_pool2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (16*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (16*x0)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + (16*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + (16*x0)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (4 + (16*x0)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (5 + (16*x0)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (6 + (16*x0)), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr0 + (7 + (16*x0)), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr0 + (8 + (16*x0)), xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr0 + (9 + (16*x0)), xmask, eviction_policy='evict_last') tmp19 = tl.load(in_ptr0 + (10 + (16*x0)), xmask, eviction_policy='evict_last') tmp21 = tl.load(in_ptr0 + (11 + (16*x0)), xmask, eviction_policy='evict_last') tmp23 = tl.load(in_ptr0 + (12 + (16*x0)), xmask, eviction_policy='evict_last') tmp25 = tl.load(in_ptr0 + (13 + (16*x0)), xmask, eviction_policy='evict_last') tmp27 = tl.load(in_ptr0 + (14 + (16*x0)), xmask, eviction_policy='evict_last') tmp29 = tl.load(in_ptr0 + (15 + (16*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp1 + tmp0 tmp4 = tmp3 + tmp2 tmp6 = tmp5 + tmp4 tmp8 = tmp7 + tmp6 tmp10 = tmp9 + tmp8 tmp12 = tmp11 + tmp10 tmp14 = tmp13 + tmp12 tmp16 = tmp15 + tmp14 tmp18 = tmp17 + tmp16 tmp20 = tmp19 + tmp18 tmp22 = tmp21 + tmp20 tmp24 = tmp23 + tmp22 tmp26 = tmp25 + tmp24 tmp28 = tmp27 + tmp26 tmp30 = tmp29 + tmp28 tmp31 = 0.0625 tmp32 = tmp30 * tmp31 tl.store(out_ptr0 + (x0), tmp32, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [avg_pool2d], Original ATen: [aten.avg_pool2d] stream0 = get_raw_stream(0) triton_poi_fused_avg_pool2d_0.run(arg0_1, buf0, 16, grid=grid(16), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn import torch.nn.functional as F class GlobalAvgPool2d(nn.Module): def __init__(self): super(GlobalAvgPool2d, self).__init__() def forward(self, x): return F.avg_pool2d(x, kernel_size=x.size()[2:]) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_avg_pool2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 16 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp3 = tl.load(in_ptr0 + (2 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp5 = tl.load(in_ptr0 + (3 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp7 = tl.load(in_ptr0 + (4 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp9 = tl.load(in_ptr0 + (5 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp11 = tl.load(in_ptr0 + (6 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp13 = tl.load(in_ptr0 + (7 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp15 = tl.load(in_ptr0 + (8 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp17 = tl.load(in_ptr0 + (9 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp19 = tl.load(in_ptr0 + (10 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp21 = tl.load(in_ptr0 + (11 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp23 = tl.load(in_ptr0 + (12 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp25 = tl.load(in_ptr0 + (13 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp27 = tl.load(in_ptr0 + (14 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp29 = tl.load(in_ptr0 + (15 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp2 = tmp1 + tmp0 tmp4 = tmp3 + tmp2 tmp6 = tmp5 + tmp4 tmp8 = tmp7 + tmp6 tmp10 = tmp9 + tmp8 tmp12 = tmp11 + tmp10 tmp14 = tmp13 + tmp12 tmp16 = tmp15 + tmp14 tmp18 = tmp17 + tmp16 tmp20 = tmp19 + tmp18 tmp22 = tmp21 + tmp20 tmp24 = tmp23 + tmp22 tmp26 = tmp25 + tmp24 tmp28 = tmp27 + tmp26 tmp30 = tmp29 + tmp28 tmp31 = 0.0625 tmp32 = tmp30 * tmp31 tl.store(out_ptr0 + x0, tmp32, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_avg_pool2d_0[grid(16)](arg0_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del arg0_1 return buf0, class GlobalAvgPool2dNew(nn.Module): def __init__(self): super(GlobalAvgPool2dNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
JamesWang007/Dive-into-DL-PyTorch
GlobalAvgPool2d
false
5,372
[ "Apache-2.0" ]
1
267b54168322ab37da44e83008fba4f24b70fa9f
https://github.com/JamesWang007/Dive-into-DL-PyTorch/tree/267b54168322ab37da44e83008fba4f24b70fa9f
import torch from torch import nn import torch.nn.functional as F class Model(nn.Module): def __init__(self): super().__init__() def forward(self, x): return F.avg_pool2d(x, kernel_size=x.size()[2:]) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
ShapePriorLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/yv/cyv2al5wgxms4uplpqpbjmuuwmzsw6m3c4otscmyh3px3rzh3whi.py # Topologically Sorted Source Nodes: [pow_1, shape_prior_loss, shape_prior_loss_1], Original ATen: [aten.pow, aten.mul, aten.mean] # Source node to ATen node mapping: # pow_1 => pow_1 # shape_prior_loss => mul # shape_prior_loss_1 => mean # Graph fragment: # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg0_1, 2), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_1, 1.0), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%mul,), kwargs = {}) triton_per_fused_mean_mul_pow_0 = async_compile.triton('triton_per_fused_mean_mul_pow_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=(2,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_mul_pow_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_mean_mul_pow_0(in_out_ptr0, in_ptr0, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tmp0 * tmp0 tmp2 = 1.0 tmp3 = tmp1 * tmp2 tmp4 = tl.broadcast_to(tmp3, [RBLOCK]) tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0)) tmp7 = 256.0 tmp8 = tmp6 / tmp7 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp8, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [pow_1, shape_prior_loss, shape_prior_loss_1], Original ATen: [aten.pow, aten.mul, aten.mean] stream0 = get_raw_stream(0) triton_per_fused_mean_mul_pow_0.run(buf1, arg0_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.cuda.comm class ShapePriorLoss(nn.Module): """Prior loss for body shape parameters. Args: reduction (str, optional): The method that reduces the loss to a scalar. Options are "none", "mean" and "sum". loss_weight (float, optional): The weight of the loss. Defaults to 1.0 """ def __init__(self, reduction='mean', loss_weight=1.0): super().__init__() assert reduction in (None, 'none', 'mean', 'sum') self.reduction = reduction self.loss_weight = loss_weight def forward(self, betas, loss_weight_override=None, reduction_override=None ): """Forward function of loss. Args: betas (torch.Tensor): The body shape parameters loss_weight_override (float, optional): The weight of loss used to override the original weight of loss reduction_override (str, optional): The reduction method used to override the original reduction method of the loss. Defaults to None Returns: torch.Tensor: The calculated loss """ assert reduction_override in (None, 'none', 'mean', 'sum') reduction = (reduction_override if reduction_override else self. reduction) loss_weight = (loss_weight_override if loss_weight_override is not None else self.loss_weight) shape_prior_loss = loss_weight * betas ** 2 if reduction == 'mean': shape_prior_loss = shape_prior_loss.mean() elif reduction == 'sum': shape_prior_loss = shape_prior_loss.sum() return shape_prior_loss def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.cuda.comm assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_mean_mul_pow_0(in_out_ptr0, in_ptr0, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tmp0 * tmp0 tmp2 = 1.0 tmp3 = tmp1 * tmp2 tmp4 = tl.broadcast_to(tmp3, [RBLOCK]) tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0)) tmp7 = 256.0 tmp8 = tmp6 / tmp7 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp8, None) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_mean_mul_pow_0[grid(1)](buf1, arg0_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 return buf1, class ShapePriorLossNew(nn.Module): """Prior loss for body shape parameters. Args: reduction (str, optional): The method that reduces the loss to a scalar. Options are "none", "mean" and "sum". loss_weight (float, optional): The weight of the loss. Defaults to 1.0 """ def __init__(self, reduction='mean', loss_weight=1.0): super().__init__() assert reduction in (None, 'none', 'mean', 'sum') self.reduction = reduction self.loss_weight = loss_weight def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
JasonBoy1/mmhuman3d
ShapePriorLoss
false
5,373
[ "Apache-2.0" ]
1
79b2665191115f3ed905e6afdf09990a8d484362
https://github.com/JasonBoy1/mmhuman3d/tree/79b2665191115f3ed905e6afdf09990a8d484362
import torch import torch.nn as nn import torch.cuda.comm class Model(nn.Module): """Prior loss for body shape parameters. Args: reduction (str, optional): The method that reduces the loss to a scalar. Options are "none", "mean" and "sum". loss_weight (float, optional): The weight of the loss. Defaults to 1.0 """ def __init__(self, reduction='mean', loss_weight=1.0): super().__init__() assert reduction in (None, 'none', 'mean', 'sum') self.reduction = reduction self.loss_weight = loss_weight def forward(self, betas, loss_weight_override=None, reduction_override=None ): """Forward function of loss. Args: betas (torch.Tensor): The body shape parameters loss_weight_override (float, optional): The weight of loss used to override the original weight of loss reduction_override (str, optional): The reduction method used to override the original reduction method of the loss. Defaults to None Returns: torch.Tensor: The calculated loss """ assert reduction_override in (None, 'none', 'mean', 'sum') reduction = (reduction_override if reduction_override else self. reduction) loss_weight = (loss_weight_override if loss_weight_override is not None else self.loss_weight) shape_prior_loss = loss_weight * betas ** 2 if reduction == 'mean': shape_prior_loss = shape_prior_loss.mean() elif reduction == 'sum': shape_prior_loss = shape_prior_loss.sum() return shape_prior_loss def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
stage_block
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/7b/c7bwvkzrfyqe7on7r6rupptsqxo3x6vxvpuiow36csr3chlibccz.py # Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # x => convolution # x_1 => relu # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu : [num_users=3] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4096], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4096 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 16) % 64 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/u7/cu7omvlo3u5eumdt3syjzrt4dpgkb7ra5377otoflgd62iyasjpk.py # Topologically Sorted Source Nodes: [x_6, x_7], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # x_6 => convolution_3 # x_7 => relu_3 # Graph fragment: # %convolution_3 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_2, %primals_8, %primals_9, [1, 1], [4, 4], [4, 4], False, [0, 0], 1), kwargs = {}) # %relu_3 : [num_users=3] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_3,), kwargs = {}) triton_poi_fused_convolution_relu_1 = async_compile.triton('triton_poi_fused_convolution_relu_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2048], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 2048 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 16) % 32 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/jd/cjdedjwqwok3veyekms6vsvzizqpg3ta7allmwzvhslkqb2q4e2i.py # Topologically Sorted Source Nodes: [x_cat], Original ATen: [aten.cat] # Source node to ATen node mapping: # x_cat => cat # Graph fragment: # %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%relu, %relu_1, %relu_2, %relu_3, %relu_4], 1), kwargs = {}) triton_poi_fused_cat_2 = async_compile.triton('triton_poi_fused_cat_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16384], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x1 = (xindex // 16) % 256 x0 = xindex % 16 x2 = (xindex // 4096) x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 64, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + (16*x1) + (1024*x2)), tmp4, other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 128, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + (x0 + (16*((-64) + x1)) + (1024*x2)), tmp9, other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1], 192, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tmp11 & tmp13 tmp15 = tl.load(in_ptr2 + (x0 + (16*((-128) + x1)) + (1024*x2)), tmp14, other=0.0) tmp16 = tmp0 >= tmp12 tmp17 = tl.full([1], 224, tl.int64) tmp18 = tmp0 < tmp17 tmp19 = tmp16 & tmp18 tmp20 = tl.load(in_ptr3 + (x0 + (16*((-192) + x1)) + (512*x2)), tmp19, other=0.0) tmp21 = tmp0 >= tmp17 tmp22 = tl.full([1], 256, tl.int64) tmp23 = tmp0 < tmp22 tmp24 = tl.load(in_ptr4 + (x0 + (16*((-224) + x1)) + (512*x2)), tmp21, other=0.0) tmp25 = tl.load(in_ptr5 + ((-224) + x1), tmp21, eviction_policy='evict_last', other=0.0) tmp26 = tmp24 + tmp25 tmp27 = tl.full([1], 0, tl.int32) tmp28 = triton_helpers.maximum(tmp27, tmp26) tmp29 = tl.full(tmp28.shape, 0.0, tmp28.dtype) tmp30 = tl.where(tmp21, tmp28, tmp29) tmp31 = tl.where(tmp19, tmp20, tmp30) tmp32 = tl.where(tmp14, tmp15, tmp31) tmp33 = tl.where(tmp9, tmp10, tmp32) tmp34 = tl.where(tmp4, tmp5, tmp33) tl.store(out_ptr0 + (x3), tmp34, None) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/uk/cukn5clwce25cwtgsk34lhhie5xjhd4s7kcgvlclefsgtceneixz.py # Topologically Sorted Source Nodes: [x_out, x_out_1], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # x_out => convolution_5 # x_out_1 => relu_5 # Graph fragment: # %convolution_5 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%cat, %primals_12, %primals_13, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_5 : [num_users=3] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_5,), kwargs = {}) triton_poi_fused_convolution_relu_3 = async_compile.triton('triton_poi_fused_convolution_relu_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8192], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 8192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 16) % 128 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/zw/czwp3zxikbzpn27bftm4h4xfeumnwrsrmzo3g36y5stomelytcev.py # Topologically Sorted Source Nodes: [paf], Original ATen: [aten.convolution] # Source node to ATen node mapping: # paf => convolution_6 # Graph fragment: # %convolution_6 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_5, %primals_14, %primals_15, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_4 = async_compile.triton('triton_poi_fused_convolution_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 896 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 14 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/2x/c2x6cucvhtkvtkj3cigyhort5u3qrtm36pjcjl72tovxzw7zlm4y.py # Topologically Sorted Source Nodes: [heatmap], Original ATen: [aten.convolution] # Source node to ATen node mapping: # heatmap => convolution_7 # Graph fragment: # %convolution_7 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_5, %primals_16, %primals_17, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_5 = async_compile.triton('triton_poi_fused_convolution_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 9 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/vb/cvbt7wqpxwgpm2qp2bqlusncosmvfnisulz23hbgvni4dcxyzgmo.py # Topologically Sorted Source Nodes: [x_8, x_9], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # x_8 => convolution_4 # x_9 => relu_4 # Graph fragment: # %convolution_4 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_3, %primals_10, %primals_11, [1, 1], [8, 8], [8, 8], False, [0, 0], 1), kwargs = {}) # %relu_4 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_4,), kwargs = {}) # %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_4, 0), kwargs = {}) triton_poi_fused_convolution_relu_threshold_backward_6 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2048], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_6(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 2048 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 16) % 32 tmp0 = tl.load(in_ptr0 + (x3), None) tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x3), tmp6, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17 = args args.clear() assert_size_stride(primals_1, (64, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (64, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_5, (64, ), (1, )) assert_size_stride(primals_6, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_7, (64, ), (1, )) assert_size_stride(primals_8, (32, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_9, (32, ), (1, )) assert_size_stride(primals_10, (32, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_11, (32, ), (1, )) assert_size_stride(primals_12, (128, 256, 1, 1), (256, 1, 1, 1)) assert_size_stride(primals_13, (128, ), (1, )) assert_size_stride(primals_14, (14, 128, 1, 1), (128, 1, 1, 1)) assert_size_stride(primals_15, (14, ), (1, )) assert_size_stride(primals_16, (9, 128, 1, 1), (128, 1, 1, 1)) assert_size_stride(primals_17, (9, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 64, 4, 4), (1024, 16, 4, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 4096, grid=grid(4096), stream=stream0) del primals_2 # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 64, 4, 4), (1024, 16, 4, 1)) buf3 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [x_2, x_3], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_0.run(buf3, primals_5, 4096, grid=grid(4096), stream=stream0) del primals_5 # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.convolution] buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1), padding=(2, 2), dilation=(2, 2), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 64, 4, 4), (1024, 16, 4, 1)) buf5 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [x_4, x_5], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_0.run(buf5, primals_7, 4096, grid=grid(4096), stream=stream0) del primals_7 # Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.convolution] buf6 = extern_kernels.convolution(buf5, primals_8, stride=(1, 1), padding=(4, 4), dilation=(4, 4), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 32, 4, 4), (512, 16, 4, 1)) buf7 = buf6; del buf6 # reuse # Topologically Sorted Source Nodes: [x_6, x_7], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_1.run(buf7, primals_9, 2048, grid=grid(2048), stream=stream0) del primals_9 # Topologically Sorted Source Nodes: [x_8], Original ATen: [aten.convolution] buf8 = extern_kernels.convolution(buf7, primals_10, stride=(1, 1), padding=(8, 8), dilation=(8, 8), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 32, 4, 4), (512, 16, 4, 1)) buf9 = empty_strided_cuda((4, 256, 4, 4), (4096, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_cat], Original ATen: [aten.cat] triton_poi_fused_cat_2.run(buf1, buf3, buf5, buf7, buf8, primals_11, buf9, 16384, grid=grid(16384), stream=stream0) # Topologically Sorted Source Nodes: [x_out], Original ATen: [aten.convolution] buf10 = extern_kernels.convolution(buf9, primals_12, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf10, (4, 128, 4, 4), (2048, 16, 4, 1)) buf11 = buf10; del buf10 # reuse # Topologically Sorted Source Nodes: [x_out, x_out_1], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_3.run(buf11, primals_13, 8192, grid=grid(8192), stream=stream0) del primals_13 # Topologically Sorted Source Nodes: [paf], Original ATen: [aten.convolution] buf12 = extern_kernels.convolution(buf11, primals_14, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf12, (4, 14, 4, 4), (224, 16, 4, 1)) buf13 = buf12; del buf12 # reuse # Topologically Sorted Source Nodes: [paf], Original ATen: [aten.convolution] triton_poi_fused_convolution_4.run(buf13, primals_15, 896, grid=grid(896), stream=stream0) del primals_15 # Topologically Sorted Source Nodes: [heatmap], Original ATen: [aten.convolution] buf14 = extern_kernels.convolution(buf11, primals_16, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf14, (4, 9, 4, 4), (144, 16, 4, 1)) buf15 = buf14; del buf14 # reuse # Topologically Sorted Source Nodes: [heatmap], Original ATen: [aten.convolution] triton_poi_fused_convolution_5.run(buf15, primals_17, 576, grid=grid(576), stream=stream0) del primals_17 buf16 = empty_strided_cuda((4, 32, 4, 4), (512, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [x_8, x_9], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] triton_poi_fused_convolution_relu_threshold_backward_6.run(buf8, primals_11, buf16, 2048, grid=grid(2048), stream=stream0) del buf8 del primals_11 return (buf13, buf15, primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, primals_12, primals_14, primals_16, buf1, buf3, buf5, buf7, buf9, buf11, buf16, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((64, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((32, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((32, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((128, 256, 1, 1), (256, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_13 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_14 = rand_strided((14, 128, 1, 1), (128, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_15 = rand_strided((14, ), (1, ), device='cuda:0', dtype=torch.float32) primals_16 = rand_strided((9, 128, 1, 1), (128, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_17 = rand_strided((9, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.onnx import torch import torch.nn as nn import torch.utils.data class dilation_layer(nn.Module): def __init__(self, in_channels, out_channels, kernel_size=3, padding= 'same_padding', dilation=1): super(dilation_layer, self).__init__() if padding == 'same_padding': padding = int((kernel_size - 1) / 2 * dilation) self.Dconv = nn.Conv2d(in_channels=in_channels, out_channels= out_channels, kernel_size=kernel_size, padding=padding, dilation=dilation) self.Drelu = nn.ReLU(inplace=True) def forward(self, x): x = self.Dconv(x) x = self.Drelu(x) return x class stage_block(nn.Module): """This class makes sure the paf and heatmap branch out in every stage""" def __init__(self, in_channels, out_channels): super(stage_block, self).__init__() self.Dconv_1 = dilation_layer(in_channels, out_channels=64) self.Dconv_2 = dilation_layer(in_channels=64, out_channels=64) self.Dconv_3 = dilation_layer(in_channels=64, out_channels=64, dilation=2) self.Dconv_4 = dilation_layer(in_channels=64, out_channels=32, dilation=4) self.Dconv_5 = dilation_layer(in_channels=32, out_channels=32, dilation=8) self.Mconv_6 = nn.Conv2d(in_channels=256, out_channels=128, kernel_size=1, padding=0) self.Mrelu_6 = nn.ReLU(inplace=True) self.paf = nn.Conv2d(in_channels=128, out_channels=14, kernel_size= 1, padding=0) self.heatmap = nn.Conv2d(in_channels=128, out_channels=9, kernel_size=1, padding=0) def forward(self, x): x_1 = self.Dconv_1(x) x_2 = self.Dconv_2(x_1) x_3 = self.Dconv_3(x_2) x_4 = self.Dconv_4(x_3) x_5 = self.Dconv_5(x_4) x_cat = torch.cat([x_1, x_2, x_3, x_4, x_5], 1) x_out = self.Mconv_6(x_cat) x_out = self.Mrelu_6(x_out) paf = self.paf(x_out) heatmap = self.heatmap(x_out) return paf, heatmap def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.onnx import torch import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 16 % 64 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 16 % 32 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_cat_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 16 % 256 x0 = xindex % 16 x2 = xindex // 4096 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 64, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 1024 * x2), tmp4, other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 128, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + (x0 + 16 * (-64 + x1) + 1024 * x2), tmp9, other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1], 192, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tmp11 & tmp13 tmp15 = tl.load(in_ptr2 + (x0 + 16 * (-128 + x1) + 1024 * x2), tmp14, other=0.0) tmp16 = tmp0 >= tmp12 tmp17 = tl.full([1], 224, tl.int64) tmp18 = tmp0 < tmp17 tmp19 = tmp16 & tmp18 tmp20 = tl.load(in_ptr3 + (x0 + 16 * (-192 + x1) + 512 * x2), tmp19, other=0.0) tmp21 = tmp0 >= tmp17 tl.full([1], 256, tl.int64) tmp24 = tl.load(in_ptr4 + (x0 + 16 * (-224 + x1) + 512 * x2), tmp21, other=0.0) tmp25 = tl.load(in_ptr5 + (-224 + x1), tmp21, eviction_policy= 'evict_last', other=0.0) tmp26 = tmp24 + tmp25 tmp27 = tl.full([1], 0, tl.int32) tmp28 = triton_helpers.maximum(tmp27, tmp26) tmp29 = tl.full(tmp28.shape, 0.0, tmp28.dtype) tmp30 = tl.where(tmp21, tmp28, tmp29) tmp31 = tl.where(tmp19, tmp20, tmp30) tmp32 = tl.where(tmp14, tmp15, tmp31) tmp33 = tl.where(tmp9, tmp10, tmp32) tmp34 = tl.where(tmp4, tmp5, tmp33) tl.store(out_ptr0 + x3, tmp34, None) @triton.jit def triton_poi_fused_convolution_relu_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 16 % 128 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_convolution_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 896 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 14 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused_convolution_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 9 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_6(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 16 % 32 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + x3, tmp6, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17) = args args.clear() assert_size_stride(primals_1, (64, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (64,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_5, (64,), (1,)) assert_size_stride(primals_6, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_7, (64,), (1,)) assert_size_stride(primals_8, (32, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_9, (32,), (1,)) assert_size_stride(primals_10, (32, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_11, (32,), (1,)) assert_size_stride(primals_12, (128, 256, 1, 1), (256, 1, 1, 1)) assert_size_stride(primals_13, (128,), (1,)) assert_size_stride(primals_14, (14, 128, 1, 1), (128, 1, 1, 1)) assert_size_stride(primals_15, (14,), (1,)) assert_size_stride(primals_16, (9, 128, 1, 1), (128, 1, 1, 1)) assert_size_stride(primals_17, (9,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 64, 4, 4), (1024, 16, 4, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(4096)](buf1, primals_2, 4096, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 64, 4, 4), (1024, 16, 4, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_relu_0[grid(4096)](buf3, primals_5, 4096, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1), padding=(2, 2), dilation=(2, 2), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 64, 4, 4), (1024, 16, 4, 1)) buf5 = buf4 del buf4 triton_poi_fused_convolution_relu_0[grid(4096)](buf5, primals_7, 4096, XBLOCK=128, num_warps=4, num_stages=1) del primals_7 buf6 = extern_kernels.convolution(buf5, primals_8, stride=(1, 1), padding=(4, 4), dilation=(4, 4), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 32, 4, 4), (512, 16, 4, 1)) buf7 = buf6 del buf6 triton_poi_fused_convolution_relu_1[grid(2048)](buf7, primals_9, 2048, XBLOCK=128, num_warps=4, num_stages=1) del primals_9 buf8 = extern_kernels.convolution(buf7, primals_10, stride=(1, 1), padding=(8, 8), dilation=(8, 8), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 32, 4, 4), (512, 16, 4, 1)) buf9 = empty_strided_cuda((4, 256, 4, 4), (4096, 16, 4, 1), torch. float32) triton_poi_fused_cat_2[grid(16384)](buf1, buf3, buf5, buf7, buf8, primals_11, buf9, 16384, XBLOCK=256, num_warps=4, num_stages=1) buf10 = extern_kernels.convolution(buf9, primals_12, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf10, (4, 128, 4, 4), (2048, 16, 4, 1)) buf11 = buf10 del buf10 triton_poi_fused_convolution_relu_3[grid(8192)](buf11, primals_13, 8192, XBLOCK=256, num_warps=4, num_stages=1) del primals_13 buf12 = extern_kernels.convolution(buf11, primals_14, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf12, (4, 14, 4, 4), (224, 16, 4, 1)) buf13 = buf12 del buf12 triton_poi_fused_convolution_4[grid(896)](buf13, primals_15, 896, XBLOCK=256, num_warps=4, num_stages=1) del primals_15 buf14 = extern_kernels.convolution(buf11, primals_16, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf14, (4, 9, 4, 4), (144, 16, 4, 1)) buf15 = buf14 del buf14 triton_poi_fused_convolution_5[grid(576)](buf15, primals_17, 576, XBLOCK=128, num_warps=4, num_stages=1) del primals_17 buf16 = empty_strided_cuda((4, 32, 4, 4), (512, 16, 4, 1), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_6[grid(2048)](buf8 , primals_11, buf16, 2048, XBLOCK=128, num_warps=4, num_stages=1) del buf8 del primals_11 return (buf13, buf15, primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, primals_12, primals_14, primals_16, buf1, buf3, buf5, buf7, buf9, buf11, buf16) class dilation_layer(nn.Module): def __init__(self, in_channels, out_channels, kernel_size=3, padding= 'same_padding', dilation=1): super(dilation_layer, self).__init__() if padding == 'same_padding': padding = int((kernel_size - 1) / 2 * dilation) self.Dconv = nn.Conv2d(in_channels=in_channels, out_channels= out_channels, kernel_size=kernel_size, padding=padding, dilation=dilation) self.Drelu = nn.ReLU(inplace=True) def forward(self, x): x = self.Dconv(x) x = self.Drelu(x) return x class stage_blockNew(nn.Module): """This class makes sure the paf and heatmap branch out in every stage""" def __init__(self, in_channels, out_channels): super(stage_blockNew, self).__init__() self.Dconv_1 = dilation_layer(in_channels, out_channels=64) self.Dconv_2 = dilation_layer(in_channels=64, out_channels=64) self.Dconv_3 = dilation_layer(in_channels=64, out_channels=64, dilation=2) self.Dconv_4 = dilation_layer(in_channels=64, out_channels=32, dilation=4) self.Dconv_5 = dilation_layer(in_channels=32, out_channels=32, dilation=8) self.Mconv_6 = nn.Conv2d(in_channels=256, out_channels=128, kernel_size=1, padding=0) self.Mrelu_6 = nn.ReLU(inplace=True) self.paf = nn.Conv2d(in_channels=128, out_channels=14, kernel_size= 1, padding=0) self.heatmap = nn.Conv2d(in_channels=128, out_channels=9, kernel_size=1, padding=0) def forward(self, input_0): primals_1 = self.Dconv_1.Dconv.weight primals_2 = self.Dconv_1.Dconv.bias primals_4 = self.Dconv_2.Dconv.weight primals_5 = self.Dconv_2.Dconv.bias primals_6 = self.Dconv_3.Dconv.weight primals_7 = self.Dconv_3.Dconv.bias primals_8 = self.Dconv_4.Dconv.weight primals_9 = self.Dconv_4.Dconv.bias primals_10 = self.Dconv_5.Dconv.weight primals_11 = self.Dconv_5.Dconv.bias primals_12 = self.Mconv_6.weight primals_13 = self.Mconv_6.bias primals_14 = self.paf.weight primals_15 = self.paf.bias primals_16 = self.heatmap.weight primals_17 = self.heatmap.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17]) return output[0], output[1]
IrohXu/Infant-Pose-pytorch
stage_block
false
5,374
[ "MIT" ]
1
148c43fbfefe06ec2fffa7055049c3ff341154f8
https://github.com/IrohXu/Infant-Pose-pytorch/tree/148c43fbfefe06ec2fffa7055049c3ff341154f8
import torch import torch.onnx import torch import torch.nn as nn import torch.utils.data class dilation_layer(nn.Module): def __init__(self, in_channels, out_channels, kernel_size=3, padding= 'same_padding', dilation=1): super().__init__() if padding == 'same_padding': padding = int((kernel_size - 1) / 2 * dilation) self.Dconv = nn.Conv2d(in_channels=in_channels, out_channels= out_channels, kernel_size=kernel_size, padding=padding, dilation=dilation) self.Drelu = nn.ReLU(inplace=True) def forward(self, x): x = self.Dconv(x) x = self.Drelu(x) return x class Model(nn.Module): """This class makes sure the paf and heatmap branch out in every stage""" def __init__(self, in_channels, out_channels): super().__init__() self.Dconv_1 = dilation_layer(in_channels, out_channels=64) self.Dconv_2 = dilation_layer(in_channels=64, out_channels=64) self.Dconv_3 = dilation_layer(in_channels=64, out_channels=64, dilation=2) self.Dconv_4 = dilation_layer(in_channels=64, out_channels=32, dilation=4) self.Dconv_5 = dilation_layer(in_channels=32, out_channels=32, dilation=8) self.Mconv_6 = nn.Conv2d(in_channels=256, out_channels=128, kernel_size=1, padding=0) self.Mrelu_6 = nn.ReLU(inplace=True) self.paf = nn.Conv2d(in_channels=128, out_channels=14, kernel_size= 1, padding=0) self.heatmap = nn.Conv2d(in_channels=128, out_channels=9, kernel_size=1, padding=0) def forward(self, x): x_1 = self.Dconv_1(x) x_2 = self.Dconv_2(x_1) x_3 = self.Dconv_3(x_2) x_4 = self.Dconv_4(x_3) x_5 = self.Dconv_5(x_4) x_cat = torch.cat([x_1, x_2, x_3, x_4, x_5], 1) x_out = self.Mconv_6(x_cat) x_out = self.Mrelu_6(x_out) paf = self.paf(x_out) heatmap = self.heatmap(x_out) return paf, heatmap def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 4]
DBlock
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/2o/c2opq7c4lvr2nhpta2dhj6evsk2zsrhfnptkaibbd4osfzyvpegc.py # Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul] # Source node to ATen node mapping: # mul => mul # Graph fragment: # %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_3, 0.08333333333333333), kwargs = {}) triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 0.08333333333333333 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/66/c66evdzvpsu44w6ectmpmlbqzklopjxx3ggre5phuka7p75i4q2u.py # Topologically Sorted Source Nodes: [mul_1, x], Original ATen: [aten.mul, aten.convolution] # Source node to ATen node mapping: # mul_1 => mul_1 # x => convolution # Graph fragment: # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, 1), kwargs = {}) # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_1, %mul, %mul_1, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_mul_1 = async_compile.triton('triton_poi_fused_convolution_mul_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_mul_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_mul_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/zj/czjj2zy73m55ran2ff3nuktc52xycawxfyqucrwjcj57tcfs4hij.py # Topologically Sorted Source Nodes: [y_3], Original ATen: [aten.constant_pad_nd] # Source node to ATen node mapping: # y_3 => constant_pad_nd # Graph fragment: # %constant_pad_nd : [num_users=1] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%view_1, [0, 0, 2, 2, 2, 2, 0, 0], 0.0), kwargs = {}) triton_poi_fused_constant_pad_nd_2 = async_compile.triton('triton_poi_fused_constant_pad_nd_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_constant_pad_nd_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_constant_pad_nd_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 8) % 8 x0 = xindex % 8 x2 = (xindex // 64) x5 = xindex tmp0 = (-2) + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = (-2) + x0 tmp6 = tmp5 >= tmp1 tmp7 = tmp5 < tmp3 tmp8 = tmp2 & tmp4 tmp9 = tmp8 & tmp6 tmp10 = tmp9 & tmp7 tmp11 = tl.load(in_ptr0 + ((-10) + x0 + (4*x1) + (16*x2)), tmp10 & xmask, other=0.0) tmp12 = tl.load(in_ptr1 + ((x5 // 64) % 4), tmp10 & xmask, eviction_policy='evict_last', other=0.0) tmp13 = tmp11 + tmp12 tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype) tmp15 = tl.where(tmp10, tmp13, tmp14) tl.store(out_ptr0 + (x5), tmp15, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/c7/cc74ivrgyu3vj6ryuxe37a3atus7w5nepnjpgru2dwcm4dnn4nvv.py # Topologically Sorted Source Nodes: [y_9], Original ATen: [aten.arange, aten.add, aten.mul, aten._to_copy] # Source node to ATen node mapping: # y_9 => add, add_1, convert_element_type, convert_element_type_1, iota, mul_2, mul_3 # Graph fragment: # %iota : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (4,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%iota, 1), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_2, 0), kwargs = {}) # %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%add, torch.float32), kwargs = {}) # %add_1 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type, 0.0), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_1, 1.25), kwargs = {}) # %convert_element_type_1 : [num_users=3] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%mul_3, torch.int64), kwargs = {}) triton_poi_fused__to_copy_add_arange_mul_3 = async_compile.triton('triton_poi_fused__to_copy_add_arange_mul_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_add_arange_mul_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__to_copy_add_arange_mul_3(out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 1.25 tmp3 = tmp1 * tmp2 tmp4 = tmp3.to(tl.int32) tl.store(out_ptr0 + (x0), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/jx/cjxrnm4f5aj2ygxurte56263qfamds42noib4sliy2hpmdandkqg.py # Topologically Sorted Source Nodes: [y_9], Original ATen: [aten._unsafe_index] # Source node to ATen node mapping: # y_9 => _unsafe_index # Graph fragment: # %_unsafe_index : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%view_3, [None, None, %unsqueeze, %convert_element_type_1]), kwargs = {}) triton_poi_fused__unsafe_index_4 = async_compile.triton('triton_poi_fused__unsafe_index_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*i64', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__unsafe_index_4(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 4) % 4 x0 = xindex % 4 x2 = (xindex // 16) x4 = xindex tmp0 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 5, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tmp6 = tmp5 + tmp1 tmp7 = tmp5 < 0 tmp8 = tl.where(tmp7, tmp6, tmp5) tmp9 = tl.load(in_ptr1 + (tmp8 + (5*tmp4) + (25*x2)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (x4), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/jj/cjj2nwkxsf7catfhjws72jgvwpk7b3ekscq2k6kcfsl3j536rhcn.py # Topologically Sorted Source Nodes: [y_19], Original ATen: [aten.convolution] # Source node to ATen node mapping: # y_19 => convolution_3 # Graph fragment: # %convolution_3 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%view_7, %primals_7, None, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_5 = async_compile.triton('triton_poi_fused_convolution_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_5(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 6) % 6 x0 = xindex % 6 x2 = (xindex // 36) x4 = xindex tmp0 = (-1) + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = (-1) + x0 tmp6 = tmp5 >= tmp1 tmp7 = tmp5 < tmp3 tmp8 = tmp2 & tmp4 tmp9 = tmp8 & tmp6 tmp10 = tmp9 & tmp7 tmp11 = tl.load(in_ptr0 + ((-5) + x0 + (4*x1) + (16*x2)), tmp10 & xmask, other=0.0) tl.store(out_ptr0 + (x4), tmp11, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/dh/cdhszrv52sdy5do26jx77mntq63tflytvejmy66lkwrnvel22zie.py # Topologically Sorted Source Nodes: [y_21], Original ATen: [aten._unsafe_index] # Source node to ATen node mapping: # y_21 => _unsafe_index_1 # Graph fragment: # %_unsafe_index_1 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%view_8, [None, None, %unsqueeze_1, %convert_element_type_6]), kwargs = {}) triton_poi_fused__unsafe_index_6 = async_compile.triton('triton_poi_fused__unsafe_index_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__unsafe_index_6(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 4) % 4 x0 = xindex % 4 x2 = (xindex // 16) x4 = xindex tmp0 = x1 tmp1 = tmp0.to(tl.float32) tmp2 = 0.75 tmp3 = tmp1 * tmp2 tmp4 = tmp3.to(tl.int32) tmp5 = x0 tmp6 = tmp5.to(tl.float32) tmp7 = tmp6 * tmp2 tmp8 = tmp7.to(tl.int32) tmp9 = tl.load(in_ptr0 + (tmp8 + (3*tmp4) + (9*x2)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (x4), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/iv/civ44pwxmgwkfi7ga4llve5u5ts22o5z7p5dd7wi2mzt6ahiqfyb.py # Topologically Sorted Source Nodes: [mul_5], Original ATen: [aten.mul] # Source node to ATen node mapping: # mul_5 => mul_14 # Graph fragment: # %mul_14 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_8, 0.25), kwargs = {}) triton_poi_fused_mul_7 = async_compile.triton('triton_poi_fused_mul_7', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_7(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 0.25 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/sd/csdf4e4q2pryhxo45xrd4mszzcfvlq2gbh2mr2ncc67ql6yzdm2q.py # Topologically Sorted Source Nodes: [mul_3, x1, out, wrapped_sqrt, out_1, mul_6, x1_1, out_2, out_3, add, wrapped_truediv, x_1], Original ATen: [aten.mul, aten.convolution, aten.leaky_relu, aten.sqrt, aten.add, aten.div, aten.leaky_relu_backward] # Source node to ATen node mapping: # add => add_8 # mul_3 => mul_7 # mul_6 => mul_15 # out => gt, mul_8, where # out_1 => mul_9 # out_2 => gt_1, mul_16, where_1 # out_3 => mul_17 # wrapped_sqrt => full_default # wrapped_truediv => full_default_2 # x1 => convolution_2 # x1_1 => convolution_4 # x_1 => mul_18 # Graph fragment: # %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_6, 1), kwargs = {}) # %convolution_2 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%view_4, %mul_6, %mul_7, [2, 2], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_2, 0), kwargs = {}) # %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_2, 0.2), kwargs = {}) # %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt, %convolution_2, %mul_8), kwargs = {}) # %full_default : [num_users=2] = call_function[target=torch.ops.aten.full.default](args = ([], 1.414213562373095), kwargs = {dtype: torch.float64, layout: torch.strided, device: cpu, pin_memory: False}) # %mul_9 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%where, %full_default), kwargs = {}) # %mul_15 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_9, 1), kwargs = {}) # %convolution_4 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%view_9, %mul_14, %mul_15, [2, 2], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt_1 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_4, 0), kwargs = {}) # %mul_16 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_4, 0.2), kwargs = {}) # %where_1 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %convolution_4, %mul_16), kwargs = {}) # %mul_17 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%where_1, %full_default), kwargs = {}) # %add_8 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_9, %mul_17), kwargs = {}) # %full_default_2 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.7071067811865476), kwargs = {dtype: torch.float64, layout: torch.strided, device: cpu, pin_memory: False}) # %mul_18 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_8, %full_default_2), kwargs = {}) # %gt_2 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%where_1, 0), kwargs = {}) # %gt_3 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%where, 0), kwargs = {}) triton_poi_fused_add_convolution_div_leaky_relu_leaky_relu_backward_mul_sqrt_8 = async_compile.triton('triton_poi_fused_add_convolution_div_leaky_relu_leaky_relu_backward_mul_sqrt_8', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*i1', 6: '*i1', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_div_leaky_relu_leaky_relu_backward_mul_sqrt_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_convolution_div_leaky_relu_leaky_relu_backward_mul_sqrt_8(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 4) % 4 tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr2 + (x3), xmask) tmp11 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tmp8 = 1.414213562373095 tmp9 = tmp7 * tmp8 tmp12 = tmp10 + tmp11 tmp13 = tmp12 > tmp3 tmp14 = tmp12 * tmp5 tmp15 = tl.where(tmp13, tmp12, tmp14) tmp16 = tmp15 * tmp8 tmp17 = tmp9 + tmp16 tmp18 = 0.7071067811865476 tmp19 = tmp17 * tmp18 tmp20 = tmp15 > tmp3 tmp21 = tmp7 > tmp3 tl.store(out_ptr0 + (x3), tmp19, xmask) tl.store(out_ptr1 + (x3), tmp20, xmask) tl.store(out_ptr2 + (x3), tmp21, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_4, (1, 1, 4, 4), (16, 16, 4, 1)) assert_size_stride(primals_5, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_6, (4, ), (1, )) assert_size_stride(primals_7, (1, 1, 4, 4), (16, 16, 4, 1)) assert_size_stride(primals_8, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_9, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 3, 3), (36, 9, 3, 1), torch.float32) # Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_mul_0.run(primals_3, buf0, 144, grid=grid(144), stream=stream0) del primals_3 buf1 = empty_strided_cuda((4, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [mul_1, x], Original ATen: [aten.mul, aten.convolution] triton_poi_fused_convolution_mul_1.run(primals_2, buf1, 4, grid=grid(4), stream=stream0) del primals_2 # Topologically Sorted Source Nodes: [mul_1, x], Original ATen: [aten.mul, aten.convolution] buf2 = extern_kernels.convolution(primals_1, buf0, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1)) buf3 = empty_strided_cuda((16, 8, 8, 1), (64, 8, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [y_3], Original ATen: [aten.constant_pad_nd] triton_poi_fused_constant_pad_nd_2.run(buf2, buf1, buf3, 1024, grid=grid(1024), stream=stream0) # Topologically Sorted Source Nodes: [y_7], Original ATen: [aten.convolution] buf4 = extern_kernels.convolution(reinterpret_tensor(buf3, (16, 1, 8, 8), (64, 0, 8, 1), 0), primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (16, 1, 5, 5), (25, 25, 5, 1)) buf5 = empty_strided_cuda((4, ), (1, ), torch.int64) # Topologically Sorted Source Nodes: [y_9], Original ATen: [aten.arange, aten.add, aten.mul, aten._to_copy] triton_poi_fused__to_copy_add_arange_mul_3.run(buf5, 4, grid=grid(4), stream=stream0) buf6 = reinterpret_tensor(buf2, (16, 1, 4, 4), (16, 1, 4, 1), 0); del buf2 # reuse # Topologically Sorted Source Nodes: [y_9], Original ATen: [aten._unsafe_index] triton_poi_fused__unsafe_index_4.run(buf5, buf4, buf6, 256, grid=grid(256), stream=stream0) del buf4 buf7 = empty_strided_cuda((4, 4, 3, 3), (36, 9, 3, 1), torch.float32) # Topologically Sorted Source Nodes: [mul_2], Original ATen: [aten.mul] triton_poi_fused_mul_0.run(primals_5, buf7, 144, grid=grid(144), stream=stream0) del primals_5 buf8 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [mul_3, x1], Original ATen: [aten.mul, aten.convolution] triton_poi_fused_convolution_mul_1.run(primals_6, buf8, 4, grid=grid(4), stream=stream0) del primals_6 # Topologically Sorted Source Nodes: [mul_3, x1], Original ATen: [aten.mul, aten.convolution] buf9 = extern_kernels.convolution(reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0), buf7, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf9, (4, 4, 2, 2), (16, 4, 2, 1)) buf10 = empty_strided_cuda((16, 1, 6, 6), (36, 6, 6, 1), torch.float32) # Topologically Sorted Source Nodes: [y_19], Original ATen: [aten.convolution] triton_poi_fused_convolution_5.run(primals_1, buf10, 576, grid=grid(576), stream=stream0) # Topologically Sorted Source Nodes: [y_19], Original ATen: [aten.convolution] buf11 = extern_kernels.convolution(buf10, primals_7, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf11, (16, 1, 3, 3), (9, 9, 3, 1)) del buf10 del primals_7 buf12 = empty_strided_cuda((16, 1, 4, 4), (16, 1, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [y_21], Original ATen: [aten._unsafe_index] triton_poi_fused__unsafe_index_6.run(buf11, buf12, 256, grid=grid(256), stream=stream0) del buf11 buf13 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [mul_5], Original ATen: [aten.mul] triton_poi_fused_mul_7.run(primals_8, buf13, 16, grid=grid(16), stream=stream0) del primals_8 buf14 = empty_strided_cuda((4, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [mul_6, x1_1], Original ATen: [aten.mul, aten.convolution] triton_poi_fused_convolution_mul_1.run(primals_9, buf14, 4, grid=grid(4), stream=stream0) del primals_9 # Topologically Sorted Source Nodes: [mul_6, x1_1], Original ATen: [aten.mul, aten.convolution] buf15 = extern_kernels.convolution(reinterpret_tensor(buf12, (4, 4, 4, 4), (64, 16, 4, 1), 0), buf13, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf15, (4, 4, 2, 2), (16, 4, 2, 1)) buf16 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32) buf17 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.bool) buf18 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.bool) # Topologically Sorted Source Nodes: [mul_3, x1, out, wrapped_sqrt, out_1, mul_6, x1_1, out_2, out_3, add, wrapped_truediv, x_1], Original ATen: [aten.mul, aten.convolution, aten.leaky_relu, aten.sqrt, aten.add, aten.div, aten.leaky_relu_backward] triton_poi_fused_add_convolution_div_leaky_relu_leaky_relu_backward_mul_sqrt_8.run(buf9, buf8, buf15, buf14, buf16, buf17, buf18, 64, grid=grid(64), stream=stream0) del buf14 del buf15 del buf8 del buf9 return (buf16, primals_1, primals_4, buf0, reinterpret_tensor(buf3, (16, 1, 8, 8), (64, 1, 8, 1), 0), buf5, reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0), buf7, reinterpret_tensor(buf12, (4, 4, 4, 4), (64, 16, 4, 1), 0), buf13, buf17, buf18, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((1, 1, 4, 4), (16, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((1, 1, 4, 4), (16, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import numpy as np import torch.nn as nn import torch.nn.functional as F def _setup_kernel(k): k = np.asarray(k, dtype=np.float32) if k.ndim == 1: k = np.outer(k, k) k /= np.sum(k) assert k.ndim == 2 assert k.shape[0] == k.shape[1] return k class Conv2d(nn.Module): def __init__(self, input_channels, output_channels, kernel_size, gain=1, use_wscale=False, lrmul=1, bias=True): super().__init__() assert kernel_size >= 1 and kernel_size % 2 == 1 he_std = gain / (input_channels * output_channels * kernel_size * kernel_size) ** 0.5 self.kernel_size = kernel_size if use_wscale: init_std = 1.0 / lrmul self.w_lrmul = he_std * lrmul else: init_std = he_std / lrmul self.w_lrmul = lrmul self.weight = torch.nn.Parameter(torch.randn(output_channels, input_channels, kernel_size, kernel_size) * init_std) if bias: self.bias = torch.nn.Parameter(torch.zeros(output_channels)) self.b_lrmul = lrmul else: self.bias = None def forward(self, x): if self.bias is not None: return F.conv2d(x, self.weight * self.w_lrmul, self.bias * self .b_lrmul, padding=self.kernel_size // 2) else: return F.conv2d(x, self.weight * self.w_lrmul, padding=self. kernel_size // 2) class ConvDownsample2d(nn.Module): def __init__(self, kernel_size, input_channels, output_channels, k=[1, 3, 3, 1], factor=2, gain=1, use_wscale=True, lrmul=1, bias=True): """ ConvDownsample2D method in D_stylegan2. :param k: FIR filter of the shape `[firH, firW]` or `[firN]` (separable). The default is `[1] * factor`, which corresponds to average pooling. :param factor: Integer downsampling factor (default: 2). :param gain: Scaling factor for signal magnitude (default: 1.0). Returns: Tensor of the shape `[N, C, H // factor, W // factor]` """ super().__init__() assert isinstance(factor, int ) and factor >= 1, 'factor must be larger than 1! (default: 2)' assert kernel_size >= 1 and kernel_size % 2 == 1 he_std = gain / (input_channels * output_channels * kernel_size * kernel_size) ** 0.5 self.kernel_size = kernel_size if use_wscale: init_std = 1.0 / lrmul self.w_lrmul = he_std * lrmul else: init_std = he_std / lrmul self.w_lrmul = lrmul self.weight = torch.nn.Parameter(torch.randn(output_channels, input_channels, kernel_size, kernel_size) * init_std) self.convH, self.convW = self.weight.shape[2:] if bias: self.bias = torch.nn.Parameter(torch.zeros(output_channels)) self.b_lrmul = lrmul else: self.bias = None self.gain = gain self.factor = factor self.k = _setup_kernel(k) * self.gain self.k = torch.FloatTensor(self.k).unsqueeze(0).unsqueeze(0) self.k = nn.Parameter(self.k, requires_grad=False) self.p = self.k.shape[-1] - self.factor + (self.convW - 1) self.padx0, self.pady0 = (self.p + 1) // 2, (self.p + 1) // 2 self.padx1, self.pady1 = self.p // 2, self.p // 2 self.kernelH, self.kernelW = self.k.shape[2:] def forward(self, x): y = x.clone() y = y.reshape([-1, x.shape[2], x.shape[3], 1]) inC, inH, inW = x.shape[1:] y = torch.reshape(y, (-1, inH, inW, 1)) y = F.pad(y, (0, 0, max(self.pady0, 0), max(self.pady1, 0), max( self.padx0, 0), max(self.padx1, 0), 0, 0)) y = y[:, max(-self.pady0, 0):y.shape[1] - max(-self.pady1, 0), max( -self.padx0, 0):y.shape[2] - max(-self.padx1, 0), :] y = y.permute(0, 3, 1, 2) y = y.reshape(-1, 1, inH + self.pady0 + self.pady1, inW + self. padx0 + self.padx1) y = F.conv2d(y, self.k) y = y.view(-1, 1, inH + self.pady0 + self.pady1 - self.kernelH + 1, inW + self.padx0 + self.padx1 - self.kernelW + 1) if inH != y.shape[1]: y = F.interpolate(y, size=(inH, inW)) y = y.permute(0, 2, 3, 1) y = y.reshape(-1, inC, inH, inW) x1 = F.conv2d(y, self.weight * self.w_lrmul, self.bias * self. b_lrmul, stride=self.factor, padding=self.convW // 2) out = F.leaky_relu(x1, 0.2, inplace=True) out = out * np.sqrt(2) return out class DBlock(nn.Module): """ D_stylegan2 Basic Block. """ def __init__(self, in1, in2, out3, use_wscale=True, lrmul=1, resample_kernel=[1, 3, 3, 1], architecture='resnet'): super().__init__() self.arch = architecture self.conv0 = Conv2d(input_channels=in1, output_channels=in2, kernel_size=3, use_wscale=use_wscale, lrmul=lrmul, bias=True) self.conv1_down = ConvDownsample2d(kernel_size=3, input_channels= in2, output_channels=out3, k=resample_kernel) self.res_conv2_down = ConvDownsample2d(kernel_size=1, input_channels=in1, output_channels=out3, k=resample_kernel) def forward(self, x): t = x.clone() x = self.conv0(x) x = self.conv1_down(x) if self.arch == 'resnet': t = self.res_conv2_down(t) x = (x + t) * (1 / np.sqrt(2)) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in1': 4, 'in2': 4, 'out3': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import numpy as np import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.08333333333333333 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_convolution_mul_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_constant_pad_nd_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 8 % 8 x0 = xindex % 8 x2 = xindex // 64 x5 = xindex tmp0 = -2 + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = -2 + x0 tmp6 = tmp5 >= tmp1 tmp7 = tmp5 < tmp3 tmp8 = tmp2 & tmp4 tmp9 = tmp8 & tmp6 tmp10 = tmp9 & tmp7 tmp11 = tl.load(in_ptr0 + (-10 + x0 + 4 * x1 + 16 * x2), tmp10 & xmask, other=0.0) tmp12 = tl.load(in_ptr1 + x5 // 64 % 4, tmp10 & xmask, eviction_policy= 'evict_last', other=0.0) tmp13 = tmp11 + tmp12 tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype) tmp15 = tl.where(tmp10, tmp13, tmp14) tl.store(out_ptr0 + x5, tmp15, xmask) @triton.jit def triton_poi_fused__to_copy_add_arange_mul_3(out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 1.25 tmp3 = tmp1 * tmp2 tmp4 = tmp3.to(tl.int32) tl.store(out_ptr0 + x0, tmp4, xmask) @triton.jit def triton_poi_fused__unsafe_index_4(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 4 x0 = xindex % 4 x2 = xindex // 16 x4 = xindex tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 5, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tmp6 = tmp5 + tmp1 tmp7 = tmp5 < 0 tmp8 = tl.where(tmp7, tmp6, tmp5) tmp9 = tl.load(in_ptr1 + (tmp8 + 5 * tmp4 + 25 * x2), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x4, tmp9, xmask) @triton.jit def triton_poi_fused_convolution_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 6 % 6 x0 = xindex % 6 x2 = xindex // 36 x4 = xindex tmp0 = -1 + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = -1 + x0 tmp6 = tmp5 >= tmp1 tmp7 = tmp5 < tmp3 tmp8 = tmp2 & tmp4 tmp9 = tmp8 & tmp6 tmp10 = tmp9 & tmp7 tmp11 = tl.load(in_ptr0 + (-5 + x0 + 4 * x1 + 16 * x2), tmp10 & xmask, other=0.0) tl.store(out_ptr0 + x4, tmp11, xmask) @triton.jit def triton_poi_fused__unsafe_index_6(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 4 x0 = xindex % 4 x2 = xindex // 16 x4 = xindex tmp0 = x1 tmp1 = tmp0.to(tl.float32) tmp2 = 0.75 tmp3 = tmp1 * tmp2 tmp4 = tmp3.to(tl.int32) tmp5 = x0 tmp6 = tmp5.to(tl.float32) tmp7 = tmp6 * tmp2 tmp8 = tmp7.to(tl.int32) tmp9 = tl.load(in_ptr0 + (tmp8 + 3 * tmp4 + 9 * x2), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x4, tmp9, xmask) @triton.jit def triton_poi_fused_mul_7(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.25 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_add_convolution_div_leaky_relu_leaky_relu_backward_mul_sqrt_8( in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr2 + x3, xmask) tmp11 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tmp8 = 1.414213562373095 tmp9 = tmp7 * tmp8 tmp12 = tmp10 + tmp11 tmp13 = tmp12 > tmp3 tmp14 = tmp12 * tmp5 tmp15 = tl.where(tmp13, tmp12, tmp14) tmp16 = tmp15 * tmp8 tmp17 = tmp9 + tmp16 tmp18 = 0.7071067811865476 tmp19 = tmp17 * tmp18 tmp20 = tmp15 > tmp3 tmp21 = tmp7 > tmp3 tl.store(out_ptr0 + x3, tmp19, xmask) tl.store(out_ptr1 + x3, tmp20, xmask) tl.store(out_ptr2 + x3, tmp21, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_4, (1, 1, 4, 4), (16, 16, 4, 1)) assert_size_stride(primals_5, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (1, 1, 4, 4), (16, 16, 4, 1)) assert_size_stride(primals_8, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_9, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 3, 3), (36, 9, 3, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(144)](primals_3, buf0, 144, XBLOCK=256, num_warps=4, num_stages=1) del primals_3 buf1 = empty_strided_cuda((4,), (1,), torch.float32) triton_poi_fused_convolution_mul_1[grid(4)](primals_2, buf1, 4, XBLOCK=4, num_warps=1, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(primals_1, buf0, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1)) buf3 = empty_strided_cuda((16, 8, 8, 1), (64, 8, 1, 1), torch.float32) triton_poi_fused_constant_pad_nd_2[grid(1024)](buf2, buf1, buf3, 1024, XBLOCK=128, num_warps=4, num_stages=1) buf4 = extern_kernels.convolution(reinterpret_tensor(buf3, (16, 1, 8, 8), (64, 0, 8, 1), 0), primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (16, 1, 5, 5), (25, 25, 5, 1)) buf5 = empty_strided_cuda((4,), (1,), torch.int64) triton_poi_fused__to_copy_add_arange_mul_3[grid(4)](buf5, 4, XBLOCK =4, num_warps=1, num_stages=1) buf6 = reinterpret_tensor(buf2, (16, 1, 4, 4), (16, 1, 4, 1), 0) del buf2 triton_poi_fused__unsafe_index_4[grid(256)](buf5, buf4, buf6, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf4 buf7 = empty_strided_cuda((4, 4, 3, 3), (36, 9, 3, 1), torch.float32) triton_poi_fused_mul_0[grid(144)](primals_5, buf7, 144, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf8 = buf1 del buf1 triton_poi_fused_convolution_mul_1[grid(4)](primals_6, buf8, 4, XBLOCK=4, num_warps=1, num_stages=1) del primals_6 buf9 = extern_kernels.convolution(reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0), buf7, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf9, (4, 4, 2, 2), (16, 4, 2, 1)) buf10 = empty_strided_cuda((16, 1, 6, 6), (36, 6, 6, 1), torch.float32) triton_poi_fused_convolution_5[grid(576)](primals_1, buf10, 576, XBLOCK=256, num_warps=4, num_stages=1) buf11 = extern_kernels.convolution(buf10, primals_7, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf11, (16, 1, 3, 3), (9, 9, 3, 1)) del buf10 del primals_7 buf12 = empty_strided_cuda((16, 1, 4, 4), (16, 1, 4, 1), torch.float32) triton_poi_fused__unsafe_index_6[grid(256)](buf11, buf12, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf11 buf13 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32) triton_poi_fused_mul_7[grid(16)](primals_8, buf13, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_8 buf14 = empty_strided_cuda((4,), (1,), torch.float32) triton_poi_fused_convolution_mul_1[grid(4)](primals_9, buf14, 4, XBLOCK=4, num_warps=1, num_stages=1) del primals_9 buf15 = extern_kernels.convolution(reinterpret_tensor(buf12, (4, 4, 4, 4), (64, 16, 4, 1), 0), buf13, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf15, (4, 4, 2, 2), (16, 4, 2, 1)) buf16 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32) buf17 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.bool) buf18 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.bool) triton_poi_fused_add_convolution_div_leaky_relu_leaky_relu_backward_mul_sqrt_8[ grid(64)](buf9, buf8, buf15, buf14, buf16, buf17, buf18, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf14 del buf15 del buf8 del buf9 return buf16, primals_1, primals_4, buf0, reinterpret_tensor(buf3, (16, 1, 8, 8), (64, 1, 8, 1), 0), buf5, reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0), buf7, reinterpret_tensor(buf12, (4, 4, 4, 4), (64, 16, 4, 1), 0), buf13, buf17, buf18 def _setup_kernel(k): k = np.asarray(k, dtype=np.float32) if k.ndim == 1: k = np.outer(k, k) k /= np.sum(k) assert k.ndim == 2 assert k.shape[0] == k.shape[1] return k class Conv2d(nn.Module): def __init__(self, input_channels, output_channels, kernel_size, gain=1, use_wscale=False, lrmul=1, bias=True): super().__init__() assert kernel_size >= 1 and kernel_size % 2 == 1 he_std = gain / (input_channels * output_channels * kernel_size * kernel_size) ** 0.5 self.kernel_size = kernel_size if use_wscale: init_std = 1.0 / lrmul self.w_lrmul = he_std * lrmul else: init_std = he_std / lrmul self.w_lrmul = lrmul self.weight = torch.nn.Parameter(torch.randn(output_channels, input_channels, kernel_size, kernel_size) * init_std) if bias: self.bias = torch.nn.Parameter(torch.zeros(output_channels)) self.b_lrmul = lrmul else: self.bias = None def forward(self, x): if self.bias is not None: return F.conv2d(x, self.weight * self.w_lrmul, self.bias * self .b_lrmul, padding=self.kernel_size // 2) else: return F.conv2d(x, self.weight * self.w_lrmul, padding=self. kernel_size // 2) class ConvDownsample2d(nn.Module): def __init__(self, kernel_size, input_channels, output_channels, k=[1, 3, 3, 1], factor=2, gain=1, use_wscale=True, lrmul=1, bias=True): """ ConvDownsample2D method in D_stylegan2. :param k: FIR filter of the shape `[firH, firW]` or `[firN]` (separable). The default is `[1] * factor`, which corresponds to average pooling. :param factor: Integer downsampling factor (default: 2). :param gain: Scaling factor for signal magnitude (default: 1.0). Returns: Tensor of the shape `[N, C, H // factor, W // factor]` """ super().__init__() assert isinstance(factor, int ) and factor >= 1, 'factor must be larger than 1! (default: 2)' assert kernel_size >= 1 and kernel_size % 2 == 1 he_std = gain / (input_channels * output_channels * kernel_size * kernel_size) ** 0.5 self.kernel_size = kernel_size if use_wscale: init_std = 1.0 / lrmul self.w_lrmul = he_std * lrmul else: init_std = he_std / lrmul self.w_lrmul = lrmul self.weight = torch.nn.Parameter(torch.randn(output_channels, input_channels, kernel_size, kernel_size) * init_std) self.convH, self.convW = self.weight.shape[2:] if bias: self.bias = torch.nn.Parameter(torch.zeros(output_channels)) self.b_lrmul = lrmul else: self.bias = None self.gain = gain self.factor = factor self.k = _setup_kernel(k) * self.gain self.k = torch.FloatTensor(self.k).unsqueeze(0).unsqueeze(0) self.k = nn.Parameter(self.k, requires_grad=False) self.p = self.k.shape[-1] - self.factor + (self.convW - 1) self.padx0, self.pady0 = (self.p + 1) // 2, (self.p + 1) // 2 self.padx1, self.pady1 = self.p // 2, self.p // 2 self.kernelH, self.kernelW = self.k.shape[2:] def forward(self, x): y = x.clone() y = y.reshape([-1, x.shape[2], x.shape[3], 1]) inC, inH, inW = x.shape[1:] y = torch.reshape(y, (-1, inH, inW, 1)) y = F.pad(y, (0, 0, max(self.pady0, 0), max(self.pady1, 0), max( self.padx0, 0), max(self.padx1, 0), 0, 0)) y = y[:, max(-self.pady0, 0):y.shape[1] - max(-self.pady1, 0), max( -self.padx0, 0):y.shape[2] - max(-self.padx1, 0), :] y = y.permute(0, 3, 1, 2) y = y.reshape(-1, 1, inH + self.pady0 + self.pady1, inW + self. padx0 + self.padx1) y = F.conv2d(y, self.k) y = y.view(-1, 1, inH + self.pady0 + self.pady1 - self.kernelH + 1, inW + self.padx0 + self.padx1 - self.kernelW + 1) if inH != y.shape[1]: y = F.interpolate(y, size=(inH, inW)) y = y.permute(0, 2, 3, 1) y = y.reshape(-1, inC, inH, inW) x1 = F.conv2d(y, self.weight * self.w_lrmul, self.bias * self. b_lrmul, stride=self.factor, padding=self.convW // 2) out = F.leaky_relu(x1, 0.2, inplace=True) out = out * np.sqrt(2) return out class DBlockNew(nn.Module): """ D_stylegan2 Basic Block. """ def __init__(self, in1, in2, out3, use_wscale=True, lrmul=1, resample_kernel=[1, 3, 3, 1], architecture='resnet'): super().__init__() self.arch = architecture self.conv0 = Conv2d(input_channels=in1, output_channels=in2, kernel_size=3, use_wscale=use_wscale, lrmul=lrmul, bias=True) self.conv1_down = ConvDownsample2d(kernel_size=3, input_channels= in2, output_channels=out3, k=resample_kernel) self.res_conv2_down = ConvDownsample2d(kernel_size=1, input_channels=in1, output_channels=out3, k=resample_kernel) def forward(self, input_0): primals_3 = self.conv0.weight primals_2 = self.conv0.bias primals_5 = self.conv1_down.weight primals_6 = self.conv1_down.bias primals_4 = self.conv1_down.k primals_8 = self.res_conv2_down.weight primals_9 = self.res_conv2_down.bias primals_7 = self.res_conv2_down.k primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
Iceland-Leo/StyleGAN2_PyTorch
DBlock
false
5,375
[ "MIT" ]
1
3621f5e4ba1c7fde7e2fae1f4700d050656a0b02
https://github.com/Iceland-Leo/StyleGAN2_PyTorch/tree/3621f5e4ba1c7fde7e2fae1f4700d050656a0b02
import torch import numpy as np import torch.nn as nn import torch.nn.functional as F def _setup_kernel(k): k = np.asarray(k, dtype=np.float32) if k.ndim == 1: k = np.outer(k, k) k /= np.sum(k) assert k.ndim == 2 assert k.shape[0] == k.shape[1] return k class Conv2d(nn.Module): def __init__(self, input_channels, output_channels, kernel_size, gain=1, use_wscale=False, lrmul=1, bias=True): super().__init__() assert kernel_size >= 1 and kernel_size % 2 == 1 he_std = gain / (input_channels * output_channels * kernel_size * kernel_size) ** 0.5 self.kernel_size = kernel_size if use_wscale: init_std = 1.0 / lrmul self.w_lrmul = he_std * lrmul else: init_std = he_std / lrmul self.w_lrmul = lrmul self.weight = torch.nn.Parameter(torch.randn(output_channels, input_channels, kernel_size, kernel_size) * init_std) if bias: self.bias = torch.nn.Parameter(torch.zeros(output_channels)) self.b_lrmul = lrmul else: self.bias = None def forward(self, x): if self.bias is not None: return F.conv2d(x, self.weight * self.w_lrmul, self.bias * self .b_lrmul, padding=self.kernel_size // 2) else: return F.conv2d(x, self.weight * self.w_lrmul, padding=self. kernel_size // 2) class ConvDownsample2d(nn.Module): def __init__(self, kernel_size, input_channels, output_channels, k=[1, 3, 3, 1], factor=2, gain=1, use_wscale=True, lrmul=1, bias=True): """ ConvDownsample2D method in D_stylegan2. :param k: FIR filter of the shape `[firH, firW]` or `[firN]` (separable). The default is `[1] * factor`, which corresponds to average pooling. :param factor: Integer downsampling factor (default: 2). :param gain: Scaling factor for signal magnitude (default: 1.0). Returns: Tensor of the shape `[N, C, H // factor, W // factor]` """ super().__init__() assert isinstance(factor, int ) and factor >= 1, 'factor must be larger than 1! (default: 2)' assert kernel_size >= 1 and kernel_size % 2 == 1 he_std = gain / (input_channels * output_channels * kernel_size * kernel_size) ** 0.5 self.kernel_size = kernel_size if use_wscale: init_std = 1.0 / lrmul self.w_lrmul = he_std * lrmul else: init_std = he_std / lrmul self.w_lrmul = lrmul self.weight = torch.nn.Parameter(torch.randn(output_channels, input_channels, kernel_size, kernel_size) * init_std) self.convH, self.convW = self.weight.shape[2:] if bias: self.bias = torch.nn.Parameter(torch.zeros(output_channels)) self.b_lrmul = lrmul else: self.bias = None self.gain = gain self.factor = factor self.k = _setup_kernel(k) * self.gain self.k = torch.FloatTensor(self.k).unsqueeze(0).unsqueeze(0) self.k = nn.Parameter(self.k, requires_grad=False) self.p = self.k.shape[-1] - self.factor + (self.convW - 1) self.padx0, self.pady0 = (self.p + 1) // 2, (self.p + 1) // 2 self.padx1, self.pady1 = self.p // 2, self.p // 2 self.kernelH, self.kernelW = self.k.shape[2:] def forward(self, x): y = x.clone() y = y.reshape([-1, x.shape[2], x.shape[3], 1]) inC, inH, inW = x.shape[1:] y = torch.reshape(y, (-1, inH, inW, 1)) y = F.pad(y, (0, 0, max(self.pady0, 0), max(self.pady1, 0), max( self.padx0, 0), max(self.padx1, 0), 0, 0)) y = y[:, max(-self.pady0, 0):y.shape[1] - max(-self.pady1, 0), max( -self.padx0, 0):y.shape[2] - max(-self.padx1, 0), :] y = y.permute(0, 3, 1, 2) y = y.reshape(-1, # ... truncated (>4000 chars) for memory efficiency
CameraPriorLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/k5/ck5iyslvv6yycowofu4466czfuna7ohrwjyq6l5tp4w7m6m6y6wu.py # Topologically Sorted Source Nodes: [neg, mul, camera_prior_loss, pow_1, camera_prior_loss_1, camera_prior_loss_2], Original ATen: [aten.neg, aten.mul, aten.exp, aten.pow, aten.mean] # Source node to ATen node mapping: # camera_prior_loss => exp # camera_prior_loss_1 => mul_1 # camera_prior_loss_2 => mean # mul => mul # neg => neg # pow_1 => pow_1 # Graph fragment: # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%select,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%neg, 10), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%mul,), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%exp, 2), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_1, 1.0), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%mul_1,), kwargs = {}) triton_per_fused_exp_mean_mul_neg_pow_0 = async_compile.triton('triton_per_fused_exp_mean_mul_neg_pow_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=(2,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_exp_mean_mul_neg_pow_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_exp_mean_mul_neg_pow_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex % 16 r1 = (rindex // 16) tmp0 = tl.load(in_ptr0 + (r0 + (64*r1)), None) tmp1 = -tmp0 tmp2 = 10.0 tmp3 = tmp1 * tmp2 tmp4 = tl_math.exp(tmp3) tmp5 = tmp4 * tmp4 tmp6 = 1.0 tmp7 = tmp5 * tmp6 tmp8 = tl.broadcast_to(tmp7, [XBLOCK, RBLOCK]) tmp10 = tl.sum(tmp8, 1)[:, None] tmp11 = 64.0 tmp12 = tmp10 / tmp11 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp12, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [neg, mul, camera_prior_loss, pow_1, camera_prior_loss_1, camera_prior_loss_2], Original ATen: [aten.neg, aten.mul, aten.exp, aten.pow, aten.mean] stream0 = get_raw_stream(0) triton_per_fused_exp_mean_mul_neg_pow_0.run(buf1, arg0_1, 1, 64, grid=grid(1), stream=stream0) del arg0_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.cuda.comm class CameraPriorLoss(nn.Module): """Prior loss for predicted camera. Args: reduction (str, optional): The method that reduces the loss to a scalar. Options are "none", "mean" and "sum". scale (float, optional): The scale coefficient for regularizing camera parameters. Defaults to 10 loss_weight (float, optional): The weight of the loss. Defaults to 1.0 """ def __init__(self, scale=10, reduction='mean', loss_weight=1.0): super().__init__() self.scale = scale self.reduction = reduction self.loss_weight = loss_weight def forward(self, cameras, loss_weight_override=None, reduction_override=None): """Forward function of loss. Args: cameras (torch.Tensor): The predicted camera parameters loss_weight_override (float, optional): The weight of loss used to override the original weight of loss reduction_override (str, optional): The reduction method used to override the original reduction method of the loss. Defaults to None Returns: torch.Tensor: The calculated loss """ assert reduction_override in (None, 'none', 'mean', 'sum') reduction = (reduction_override if reduction_override else self. reduction) loss_weight = (loss_weight_override if loss_weight_override is not None else self.loss_weight) camera_prior_loss = torch.exp(-cameras[:, 0] * self.scale) camera_prior_loss = torch.pow(camera_prior_loss, 2) * loss_weight if reduction == 'mean': camera_prior_loss = camera_prior_loss.mean() elif reduction == 'sum': camera_prior_loss = camera_prior_loss.sum() return camera_prior_loss def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch.cuda.comm assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_exp_mean_mul_neg_pow_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex % 16 r1 = rindex // 16 tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None) tmp1 = -tmp0 tmp2 = 10.0 tmp3 = tmp1 * tmp2 tmp4 = tl_math.exp(tmp3) tmp5 = tmp4 * tmp4 tmp6 = 1.0 tmp7 = tmp5 * tmp6 tmp8 = tl.broadcast_to(tmp7, [XBLOCK, RBLOCK]) tmp10 = tl.sum(tmp8, 1)[:, None] tmp11 = 64.0 tmp12 = tmp10 / tmp11 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp12, None) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_exp_mean_mul_neg_pow_0[grid(1)](buf1, arg0_1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 return buf1, class CameraPriorLossNew(nn.Module): """Prior loss for predicted camera. Args: reduction (str, optional): The method that reduces the loss to a scalar. Options are "none", "mean" and "sum". scale (float, optional): The scale coefficient for regularizing camera parameters. Defaults to 10 loss_weight (float, optional): The weight of the loss. Defaults to 1.0 """ def __init__(self, scale=10, reduction='mean', loss_weight=1.0): super().__init__() self.scale = scale self.reduction = reduction self.loss_weight = loss_weight def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
JasonBoy1/mmhuman3d
CameraPriorLoss
false
5,376
[ "Apache-2.0" ]
1
79b2665191115f3ed905e6afdf09990a8d484362
https://github.com/JasonBoy1/mmhuman3d/tree/79b2665191115f3ed905e6afdf09990a8d484362
import torch import torch.nn as nn import torch.cuda.comm class Model(nn.Module): """Prior loss for predicted camera. Args: reduction (str, optional): The method that reduces the loss to a scalar. Options are "none", "mean" and "sum". scale (float, optional): The scale coefficient for regularizing camera parameters. Defaults to 10 loss_weight (float, optional): The weight of the loss. Defaults to 1.0 """ def __init__(self, scale=10, reduction='mean', loss_weight=1.0): super().__init__() self.scale = scale self.reduction = reduction self.loss_weight = loss_weight def forward(self, cameras, loss_weight_override=None, reduction_override=None): """Forward function of loss. Args: cameras (torch.Tensor): The predicted camera parameters loss_weight_override (float, optional): The weight of loss used to override the original weight of loss reduction_override (str, optional): The reduction method used to override the original reduction method of the loss. Defaults to None Returns: torch.Tensor: The calculated loss """ assert reduction_override in (None, 'none', 'mean', 'sum') reduction = (reduction_override if reduction_override else self. reduction) loss_weight = (loss_weight_override if loss_weight_override is not None else self.loss_weight) camera_prior_loss = torch.exp(-cameras[:, 0] * self.scale) camera_prior_loss = torch.pow(camera_prior_loss, 2) * loss_weight if reduction == 'mean': camera_prior_loss = camera_prior_loss.mean() elif reduction == 'sum': camera_prior_loss = camera_prior_loss.sum() return camera_prior_loss def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
LearnedPositionalEncoding
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/up/cuphmudfbm5xmuymbvibhn5q6e5y55prj6knqkmj43mx7ovcvcgz.py # Topologically Sorted Source Nodes: [pos], Original ATen: [aten.arange] # Source node to ATen node mapping: # pos => iota # Graph fragment: # %iota : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (4,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False}) triton_poi_fused_arange_0 = async_compile.triton('triton_poi_fused_arange_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_arange_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_arange_0(out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tl.store(out_ptr0 + (x0), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/7g/c7ggkspjiryu6lrnyqmt3ltk6cvy3xv7hlniyify4whtbxpu3vcz.py # Topologically Sorted Source Nodes: [embedding], Original ATen: [aten.embedding] # Source node to ATen node mapping: # embedding => embedding # Graph fragment: # %embedding : [num_users=2] = call_function[target=torch.ops.aten.embedding.default](args = (%primals_2, %expand), kwargs = {}) triton_poi_fused_embedding_1 = async_compile.triton('triton_poi_fused_embedding_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*i64', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_embedding_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_embedding_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = (xindex // 16) x0 = xindex % 4 x4 = xindex tmp0 = tl.load(in_ptr0 + (x2), xmask, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 100, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tl.device_assert(((0 <= tmp4) & (tmp4 < 100)) | ~(xmask), "index out of bounds: 0 <= tmp4 < 100") tmp6 = tl.load(in_ptr1 + (x0 + (4*tmp4)), xmask) tl.store(out_ptr0 + (x4), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/y3/cy35gsxm3xa2mi4ahfwlz6f5exunl2ig4pka6onlgazxeb4uuhe3.py # Topologically Sorted Source Nodes: [x, u, sub, pow_1, s], Original ATen: [aten.add, aten.mean, aten.sub, aten.pow] # Source node to ATen node mapping: # pow_1 => pow_1 # s => mean_1 # sub => sub # u => mean # x => add # Graph fragment: # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %embedding), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%add, [-1], True), kwargs = {}) # %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %mean), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {}) # %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%pow_1, [-1], True), kwargs = {}) triton_poi_fused_add_mean_pow_sub_2 = async_compile.triton('triton_poi_fused_add_mean_pow_sub_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mean_pow_sub_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mean_pow_sub_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_ptr0 + (4*x2), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + (4*x2)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + (4*x2)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + (x2), tmp16, xmask) tl.store(out_ptr1 + (x2), tmp28, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/hy/chysp3ia3h42md5xyhzqsgskl34zzc6iy7yp2pqz3zla4xgfsa47.py # Topologically Sorted Source Nodes: [x, u, sub, add_1, sqrt, x_1, mul, add_2], Original ATen: [aten.add, aten.mean, aten.sub, aten.sqrt, aten.div, aten.mul] # Source node to ATen node mapping: # add_1 => add_1 # add_2 => add_2 # mul => mul # sqrt => sqrt # sub => sub # u => mean # x => add # x_1 => div # Graph fragment: # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %embedding), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%add, [-1], True), kwargs = {}) # %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %mean), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean_1, 1e-12), kwargs = {}) # %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_1,), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, %sqrt), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_3, %div), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %primals_4), kwargs = {}) triton_poi_fused_add_div_mean_mul_sqrt_sub_3 = async_compile.triton('triton_poi_fused_add_div_mean_mul_sqrt_sub_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mean_mul_sqrt_sub_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_mean_mul_sqrt_sub_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x3 = xindex x4 = xindex % 64 x5 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x3), xmask) tmp2 = tl.load(in_ptr2 + (x4), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr3 + (x5), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr4 + (x5), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 - tmp4 tmp7 = 1e-12 tmp8 = tmp6 + tmp7 tmp9 = libdevice.sqrt(tmp8) tmp10 = tmp5 / tmp9 tmp11 = tmp0 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + (x3), tmp13, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (100, 4), (4, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, ), (1, ), torch.int64) # Topologically Sorted Source Nodes: [pos], Original ATen: [aten.arange] stream0 = get_raw_stream(0) triton_poi_fused_arange_0.run(buf0, 4, grid=grid(4), stream=stream0) buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [embedding], Original ATen: [aten.embedding] triton_poi_fused_embedding_1.run(buf0, primals_2, buf1, 64, grid=grid(64), stream=stream0) del primals_2 buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) # Topologically Sorted Source Nodes: [x, u, sub, pow_1, s], Original ATen: [aten.add, aten.mean, aten.sub, aten.pow] triton_poi_fused_add_mean_pow_sub_2.run(primals_1, buf1, buf2, buf3, 64, grid=grid(64), stream=stream0) buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x, u, sub, add_1, sqrt, x_1, mul, add_2], Original ATen: [aten.add, aten.mean, aten.sub, aten.sqrt, aten.div, aten.mul] triton_poi_fused_add_div_mean_mul_sqrt_sub_3.run(primals_3, primals_1, buf1, buf2, buf3, primals_4, buf4, 256, grid=grid(256), stream=stream0) del buf2 del buf3 del primals_4 return (buf4, primals_1, primals_3, reinterpret_tensor(buf0, (4, 1), (1, 1), 0), buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((100, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn class LayerNorm(nn.Module): """A layernorm module in the TF style (epsilon inside the square root).""" def __init__(self, d_model, variance_epsilon=1e-12): super().__init__() self.gamma = nn.Parameter(torch.ones(d_model)) self.beta = nn.Parameter(torch.zeros(d_model)) self.variance_epsilon = variance_epsilon def forward(self, x): u = x.mean(-1, keepdim=True) s = (x - u).pow(2).mean(-1, keepdim=True) x = (x - u) / torch.sqrt(s + self.variance_epsilon) return self.gamma * x + self.beta class LearnedPositionalEncoding(nn.Module): def __init__(self, d_model, dropout=0.1, max_len=100): super(LearnedPositionalEncoding, self).__init__() self.dropout = nn.Dropout(p=dropout) self.pos_embed = nn.Embedding(max_len, d_model) self.layernorm = LayerNorm(d_model) def forward(self, x): seq_len = x.size(0) pos = torch.arange(seq_len, dtype=torch.long, device=x.device) pos = pos.unsqueeze(-1).expand(x.size()[:2]) x = x + self.pos_embed(pos) return self.dropout(self.layernorm(x)) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'d_model': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_arange_0(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tl.store(out_ptr0 + x0, tmp0, xmask) @triton.jit def triton_poi_fused_embedding_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex // 16 x0 = xindex % 4 x4 = xindex tmp0 = tl.load(in_ptr0 + x2, xmask, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 100, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tl.device_assert((0 <= tmp4) & (tmp4 < 100) | ~xmask, 'index out of bounds: 0 <= tmp4 < 100') tmp6 = tl.load(in_ptr1 + (x0 + 4 * tmp4), xmask) tl.store(out_ptr0 + x4, tmp6, xmask) @triton.jit def triton_poi_fused_add_mean_pow_sub_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_ptr0 + 4 * x2, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x2), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x2), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + x2, tmp16, xmask) tl.store(out_ptr1 + x2, tmp28, xmask) @triton.jit def triton_poi_fused_add_div_mean_mul_sqrt_sub_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x3 = xindex x4 = xindex % 64 x5 = xindex // 4 tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x3, xmask) tmp2 = tl.load(in_ptr2 + x4, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr3 + x5, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr4 + x5, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 - tmp4 tmp7 = 1e-12 tmp8 = tmp6 + tmp7 tmp9 = libdevice.sqrt(tmp8) tmp10 = tmp5 / tmp9 tmp11 = tmp0 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + x3, tmp13, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (100, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4,), (1,), torch.int64) get_raw_stream(0) triton_poi_fused_arange_0[grid(4)](buf0, 4, XBLOCK=4, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_embedding_1[grid(64)](buf0, primals_2, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_2 buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) triton_poi_fused_add_mean_pow_sub_2[grid(64)](primals_1, buf1, buf2, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_div_mean_mul_sqrt_sub_3[grid(256)](primals_3, primals_1, buf1, buf2, buf3, primals_4, buf4, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf2 del buf3 del primals_4 return buf4, primals_1, primals_3, reinterpret_tensor(buf0, (4, 1), (1, 1), 0), buf1 class LayerNorm(nn.Module): """A layernorm module in the TF style (epsilon inside the square root).""" def __init__(self, d_model, variance_epsilon=1e-12): super().__init__() self.gamma = nn.Parameter(torch.ones(d_model)) self.beta = nn.Parameter(torch.zeros(d_model)) self.variance_epsilon = variance_epsilon def forward(self, x): u = x.mean(-1, keepdim=True) s = (x - u).pow(2).mean(-1, keepdim=True) x = (x - u) / torch.sqrt(s + self.variance_epsilon) return self.gamma * x + self.beta class LearnedPositionalEncodingNew(nn.Module): def __init__(self, d_model, dropout=0.1, max_len=100): super(LearnedPositionalEncodingNew, self).__init__() self.dropout = nn.Dropout(p=dropout) self.pos_embed = nn.Embedding(max_len, d_model) self.layernorm = LayerNorm(d_model) def forward(self, input_0): primals_2 = self.pos_embed.weight primals_3 = self.layernorm.gamma primals_4 = self.layernorm.beta primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
JamesNgo3781/vietocr
LearnedPositionalEncoding
false
5,377
[ "Apache-2.0" ]
1
9d311bbeb18c51c8ff90022f07c0463b204407dc
https://github.com/JamesNgo3781/vietocr/tree/9d311bbeb18c51c8ff90022f07c0463b204407dc
import torch from torch import nn class LayerNorm(nn.Module): """A layernorm module in the TF style (epsilon inside the square root).""" def __init__(self, d_model, variance_epsilon=1e-12): super().__init__() self.gamma = nn.Parameter(torch.ones(d_model)) self.beta = nn.Parameter(torch.zeros(d_model)) self.variance_epsilon = variance_epsilon def forward(self, x): u = x.mean(-1, keepdim=True) s = (x - u).pow(2).mean(-1, keepdim=True) x = (x - u) / torch.sqrt(s + self.variance_epsilon) return self.gamma * x + self.beta class Model(nn.Module): def __init__(self, d_model, dropout=0.1, max_len=100): super().__init__() self.dropout = nn.Dropout(p=dropout) self.pos_embed = nn.Embedding(max_len, d_model) self.layernorm = LayerNorm(d_model) def forward(self, x): seq_len = x.size(0) pos = torch.arange(seq_len, dtype=torch.long, device=x.device) pos = pos.unsqueeze(-1).expand(x.size()[:2]) x = x + self.pos_embed(pos) return self.dropout(self.layernorm(x)) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4]
L1Loss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/i5/ci5r22vnwphjxav3oibgww4fkm25q4egp3rofzniyjru2u4b563f.py # Topologically Sorted Source Nodes: [sub, loss, loss_1, loss_2], Original ATen: [aten.sub, aten.abs, aten.mean, aten.mul] # Source node to ATen node mapping: # loss => abs_1 # loss_1 => mean # loss_2 => mul # sub => sub # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %arg1_1), kwargs = {}) # %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%sub,), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%abs_1,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean, 1.0), kwargs = {}) triton_per_fused_abs_mean_mul_sub_0 = async_compile.triton('triton_per_fused_abs_mean_mul_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_abs_mean_mul_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_abs_mean_mul_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.load(in_ptr1 + (r0), None) tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp4 = tl.broadcast_to(tmp3, [RBLOCK]) tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0)) tmp7 = 256.0 tmp8 = tmp6 / tmp7 tmp9 = 1.0 tmp10 = tmp8 * tmp9 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp10, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [sub, loss, loss_1, loss_2], Original ATen: [aten.sub, aten.abs, aten.mean, aten.mul] stream0 = get_raw_stream(0) triton_per_fused_abs_mean_mul_sub_0.run(buf1, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import functools import torch import torch.nn as nn import torch.cuda.comm from torch.nn import functional as F def reduce_loss(loss, reduction): """Reduce loss as specified. Args: loss (Tensor): Elementwise loss tensor. reduction (str): Options are "none", "mean" and "sum". Return: Tensor: Reduced loss tensor. """ reduction_enum = F._Reduction.get_enum(reduction) if reduction_enum == 0: return loss elif reduction_enum == 1: return loss.mean() elif reduction_enum == 2: return loss.sum() def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None): """Apply element-wise weight and reduce loss. Args: loss (Tensor): Element-wise loss. weight (Tensor): Element-wise weights. reduction (str): Same as built-in losses of PyTorch. avg_factor (float): Avarage factor when computing the mean of losses. Returns: Tensor: Processed loss values. """ if weight is not None: loss = loss * weight if avg_factor is None: loss = reduce_loss(loss, reduction) elif reduction == 'mean': loss = loss.sum() / avg_factor elif reduction != 'none': raise ValueError('avg_factor can not be used with reduction="sum"') return loss def weighted_loss(loss_func): """Create a weighted version of a given loss function. To use this decorator, the loss function must have the signature like `loss_func(pred, target, **kwargs)`. The function only needs to compute element-wise loss without any reduction. This decorator will add weight and reduction arguments to the function. The decorated function will have the signature like `loss_func(pred, target, weight=None, reduction='mean', avg_factor=None, **kwargs)`. :Example: >>> import torch >>> @weighted_loss >>> def l1_loss(pred, target): >>> return (pred - target).abs() >>> pred = torch.Tensor([0, 2, 3]) >>> target = torch.Tensor([1, 1, 1]) >>> weight = torch.Tensor([1, 0, 1]) >>> l1_loss(pred, target) tensor(1.3333) >>> l1_loss(pred, target, weight) tensor(1.) >>> l1_loss(pred, target, reduction='none') tensor([1., 1., 2.]) >>> l1_loss(pred, target, weight, avg_factor=2) tensor(1.5000) """ @functools.wraps(loss_func) def wrapper(pred, target, weight=None, reduction='mean', avg_factor= None, **kwargs): loss = loss_func(pred, target, **kwargs) loss = weight_reduce_loss(loss, weight, reduction, avg_factor) return loss return wrapper @weighted_loss def l1_loss(pred, target): """L1 loss. Args: pred (torch.Tensor): The prediction. target (torch.Tensor): The learning target of the prediction. Returns: torch.Tensor: Calculated loss """ assert pred.size() == target.size() and target.numel() > 0 loss = torch.abs(pred - target) return loss class L1Loss(nn.Module): """L1 loss. Args: reduction (str, optional): The method to reduce the loss. Options are "none", "mean" and "sum". loss_weight (float, optional): The weight of loss. """ def __init__(self, reduction='mean', loss_weight=1.0): super(L1Loss, self).__init__() self.reduction = reduction self.loss_weight = loss_weight def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None): """Forward function. Args: pred (torch.Tensor): The prediction. target (torch.Tensor): The learning target of the prediction. weight (torch.Tensor, optional): The weight of loss for each prediction. Defaults to None. avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. reduction_override (str, optional): The reduction method used to override the original reduction method of the loss. Defaults to None. """ assert reduction_override in (None, 'none', 'mean', 'sum') reduction = (reduction_override if reduction_override else self. reduction) loss = self.loss_weight * l1_loss(pred, target, weight, reduction= reduction, avg_factor=avg_factor) return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import functools import torch.nn as nn import torch.cuda.comm from torch.nn import functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_abs_mean_mul_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp4 = tl.broadcast_to(tmp3, [RBLOCK]) tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0)) tmp7 = 256.0 tmp8 = tmp6 / tmp7 tmp9 = 1.0 tmp10 = tmp8 * tmp9 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp10, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_abs_mean_mul_sub_0[grid(1)](buf1, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, def reduce_loss(loss, reduction): """Reduce loss as specified. Args: loss (Tensor): Elementwise loss tensor. reduction (str): Options are "none", "mean" and "sum". Return: Tensor: Reduced loss tensor. """ reduction_enum = F._Reduction.get_enum(reduction) if reduction_enum == 0: return loss elif reduction_enum == 1: return loss.mean() elif reduction_enum == 2: return loss.sum() def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None): """Apply element-wise weight and reduce loss. Args: loss (Tensor): Element-wise loss. weight (Tensor): Element-wise weights. reduction (str): Same as built-in losses of PyTorch. avg_factor (float): Avarage factor when computing the mean of losses. Returns: Tensor: Processed loss values. """ if weight is not None: loss = loss * weight if avg_factor is None: loss = reduce_loss(loss, reduction) elif reduction == 'mean': loss = loss.sum() / avg_factor elif reduction != 'none': raise ValueError('avg_factor can not be used with reduction="sum"') return loss def weighted_loss(loss_func): """Create a weighted version of a given loss function. To use this decorator, the loss function must have the signature like `loss_func(pred, target, **kwargs)`. The function only needs to compute element-wise loss without any reduction. This decorator will add weight and reduction arguments to the function. The decorated function will have the signature like `loss_func(pred, target, weight=None, reduction='mean', avg_factor=None, **kwargs)`. :Example: >>> import torch >>> @weighted_loss >>> def l1_loss(pred, target): >>> return (pred - target).abs() >>> pred = torch.Tensor([0, 2, 3]) >>> target = torch.Tensor([1, 1, 1]) >>> weight = torch.Tensor([1, 0, 1]) >>> l1_loss(pred, target) tensor(1.3333) >>> l1_loss(pred, target, weight) tensor(1.) >>> l1_loss(pred, target, reduction='none') tensor([1., 1., 2.]) >>> l1_loss(pred, target, weight, avg_factor=2) tensor(1.5000) """ @functools.wraps(loss_func) def wrapper(pred, target, weight=None, reduction='mean', avg_factor= None, **kwargs): loss = loss_func(pred, target, **kwargs) loss = weight_reduce_loss(loss, weight, reduction, avg_factor) return loss return wrapper @weighted_loss def l1_loss(pred, target): """L1 loss. Args: pred (torch.Tensor): The prediction. target (torch.Tensor): The learning target of the prediction. Returns: torch.Tensor: Calculated loss """ assert pred.size() == target.size() and target.numel() > 0 loss = torch.abs(pred - target) return loss class L1LossNew(nn.Module): """L1 loss. Args: reduction (str, optional): The method to reduce the loss. Options are "none", "mean" and "sum". loss_weight (float, optional): The weight of loss. """ def __init__(self, reduction='mean', loss_weight=1.0): super(L1LossNew, self).__init__() self.reduction = reduction self.loss_weight = loss_weight def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
JasonBoy1/mmhuman3d
L1Loss
false
5,378
[ "Apache-2.0" ]
1
79b2665191115f3ed905e6afdf09990a8d484362
https://github.com/JasonBoy1/mmhuman3d/tree/79b2665191115f3ed905e6afdf09990a8d484362
import functools import torch import torch.nn as nn import torch.cuda.comm from torch.nn import functional as F def reduce_loss(loss, reduction): """Reduce loss as specified. Args: loss (Tensor): Elementwise loss tensor. reduction (str): Options are "none", "mean" and "sum". Return: Tensor: Reduced loss tensor. """ reduction_enum = F._Reduction.get_enum(reduction) if reduction_enum == 0: return loss elif reduction_enum == 1: return loss.mean() elif reduction_enum == 2: return loss.sum() def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None): """Apply element-wise weight and reduce loss. Args: loss (Tensor): Element-wise loss. weight (Tensor): Element-wise weights. reduction (str): Same as built-in losses of PyTorch. avg_factor (float): Avarage factor when computing the mean of losses. Returns: Tensor: Processed loss values. """ if weight is not None: loss = loss * weight if avg_factor is None: loss = reduce_loss(loss, reduction) elif reduction == 'mean': loss = loss.sum() / avg_factor elif reduction != 'none': raise ValueError('avg_factor can not be used with reduction="sum"') return loss def weighted_loss(loss_func): """Create a weighted version of a given loss function. To use this decorator, the loss function must have the signature like `loss_func(pred, target, **kwargs)`. The function only needs to compute element-wise loss without any reduction. This decorator will add weight and reduction arguments to the function. The decorated function will have the signature like `loss_func(pred, target, weight=None, reduction='mean', avg_factor=None, **kwargs)`. :Example: >>> import torch >>> @weighted_loss >>> def l1_loss(pred, target): >>> return (pred - target).abs() >>> pred = torch.Tensor([0, 2, 3]) >>> target = torch.Tensor([1, 1, 1]) >>> weight = torch.Tensor([1, 0, 1]) >>> l1_loss(pred, target) tensor(1.3333) >>> l1_loss(pred, target, weight) tensor(1.) >>> l1_loss(pred, target, reduction='none') tensor([1., 1., 2.]) >>> l1_loss(pred, target, weight, avg_factor=2) tensor(1.5000) """ @functools.wraps(loss_func) def wrapper(pred, target, weight=None, reduction='mean', avg_factor= None, **kwargs): loss = loss_func(pred, target, **kwargs) loss = weight_reduce_loss(loss, weight, reduction, avg_factor) return loss return wrapper @weighted_loss def l1_loss(pred, target): """L1 loss. Args: pred (torch.Tensor): The prediction. target (torch.Tensor): The learning target of the prediction. Returns: torch.Tensor: Calculated loss """ assert pred.size() == target.size() and target.numel() > 0 loss = torch.abs(pred - target) return loss class Model(nn.Module): """L1 loss. Args: reduction (str, optional): The method to reduce the loss. Options are "none", "mean" and "sum". loss_weight (float, optional): The weight of loss. """ def __init__(self, reduction='mean', loss_weight=1.0): super().__init__() self.reduction = reduction self.loss_weight = loss_weight def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None): """Forward function. Args: pred (torch.Tensor): The prediction. target (torch.Tensor): The learning target of the prediction. weight (torch.Tensor, optional): The weight of loss for each prediction. Defaults to None. avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. reduction_override (str, optional): The reduction method used to overr # ... truncated (>4000 chars) for memory efficiency
MediatorNet
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/d4/cd4rmmsd46lha6l4ut5a6nmc4wf52z3irm2fxiokbhgjhmlh75hj.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # x_1 => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) # %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 768 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 12 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, xmask) tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (12, 4), (4, 1)) assert_size_stride(primals_2, (12, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (12, 12), (12, 1)) assert_size_stride(primals_5, (12, ), (1, )) assert_size_stride(primals_6, (2, 12), (12, 1)) assert_size_stride(primals_7, (2, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 12), (12, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 12), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 12), (192, 48, 12, 1), 0); del buf0 # reuse buf6 = empty_strided_cuda((4, 4, 4, 12), (192, 48, 12, 1), torch.bool) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf6, 768, grid=grid(768), stream=stream0) del primals_2 buf2 = empty_strided_cuda((64, 12), (12, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf1, (64, 12), (12, 1), 0), reinterpret_tensor(primals_4, (12, 12), (1, 12), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 12), (192, 48, 12, 1), 0); del buf2 # reuse buf5 = empty_strided_cuda((4, 4, 4, 12), (192, 48, 12, 1), torch.bool) # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_0.run(buf3, primals_5, buf5, 768, grid=grid(768), stream=stream0) del primals_5 buf4 = empty_strided_cuda((64, 2), (2, 1), torch.float32) # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.addmm] extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 12), (12, 1), 0), reinterpret_tensor(primals_6, (12, 2), (1, 12), 0), alpha=1, beta=1, out=buf4) del primals_7 return (reinterpret_tensor(buf4, (4, 4, 4, 2), (32, 8, 2, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 12), (12, 1), 0), reinterpret_tensor(buf3, (64, 12), (12, 1), 0), primals_6, buf5, primals_4, buf6, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((12, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((12, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((12, 12), (12, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((12, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((2, 12), (12, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class MediatorNet(nn.Module): def __init__(self, input_dim): super(MediatorNet, self).__init__() self.fc1 = nn.Linear(input_dim, input_dim * 3) self.fc2 = nn.Linear(input_dim * 3, input_dim * 3) self.fc_last = nn.Linear(input_dim * 3, 2) self.relu = nn.ReLU() def forward(self, x): x = self.fc1(x) x = self.relu(x) x = self.fc2(x) x = self.relu(x) x = self.fc_last(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 768 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 12 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (12, 4), (4, 1)) assert_size_stride(primals_2, (12,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (12, 12), (12, 1)) assert_size_stride(primals_5, (12,), (1,)) assert_size_stride(primals_6, (2, 12), (12, 1)) assert_size_stride(primals_7, (2,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 12), (12, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 12), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 12), (192, 48, 12, 1), 0) del buf0 buf6 = empty_strided_cuda((4, 4, 4, 12), (192, 48, 12, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(768)](buf1, primals_2, buf6, 768, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 12), (12, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 12), (12, 1), 0), reinterpret_tensor(primals_4, (12, 12), (1, 12), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 12), (192, 48, 12, 1), 0) del buf2 buf5 = empty_strided_cuda((4, 4, 4, 12), (192, 48, 12, 1), torch.bool) triton_poi_fused_relu_threshold_backward_0[grid(768)](buf3, primals_5, buf5, 768, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 2), (2, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 12), (12, 1), 0), reinterpret_tensor(primals_6, (12, 2), (1, 12), 0), alpha=1, beta=1, out=buf4) del primals_7 return reinterpret_tensor(buf4, (4, 4, 4, 2), (32, 8, 2, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 12), (12, 1), 0), reinterpret_tensor( buf3, (64, 12), (12, 1), 0), primals_6, buf5, primals_4, buf6 class MediatorNetNew(nn.Module): def __init__(self, input_dim): super(MediatorNetNew, self).__init__() self.fc1 = nn.Linear(input_dim, input_dim * 3) self.fc2 = nn.Linear(input_dim * 3, input_dim * 3) self.fc_last = nn.Linear(input_dim * 3, 2) self.relu = nn.ReLU() def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_6 = self.fc_last.weight primals_7 = self.fc_last.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
JasonZuu/Frame-Selection
MediatorNet
false
5,379
[ "BSD-3-Clause" ]
1
3eb6ecdbf8e5695ba53752bdd8446def9c5cfbb9
https://github.com/JasonZuu/Frame-Selection/tree/3eb6ecdbf8e5695ba53752bdd8446def9c5cfbb9
import torch import torch.nn as nn class Model(nn.Module): def __init__(self, input_dim): super().__init__() self.fc1 = nn.Linear(input_dim, input_dim * 3) self.fc2 = nn.Linear(input_dim * 3, input_dim * 3) self.fc_last = nn.Linear(input_dim * 3, 2) self.relu = nn.ReLU() def forward(self, x): x = self.fc1(x) x = self.relu(x) x = self.fc2(x) x = self.relu(x) x = self.fc_last(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4]
HSigmoid
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/gl/cgljna3wfarubemgd6d2p3bgazvfhdxtrcu7luu5yza3rrfkty2s.py # Topologically Sorted Source Nodes: [add, relu6, out], Original ATen: [aten.add, aten.hardtanh, aten.div] # Source node to ATen node mapping: # add => add # out => div # relu6 => clamp_max, clamp_min # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg0_1, 3), kwargs = {}) # %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%add, 0), kwargs = {}) # %clamp_max : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min, 6), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%clamp_max, 6), kwargs = {}) triton_poi_fused_add_div_hardtanh_0 = async_compile.triton('triton_poi_fused_add_div_hardtanh_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_hardtanh_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_hardtanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 3.0 tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp5 = 6.0 tmp6 = triton_helpers.minimum(tmp4, tmp5) tmp7 = 0.16666666666666666 tmp8 = tmp6 * tmp7 tl.store(out_ptr0 + (x0), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [add, relu6, out], Original ATen: [aten.add, aten.hardtanh, aten.div] stream0 = get_raw_stream(0) triton_poi_fused_add_div_hardtanh_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class HSigmoid(nn.Module): def forward(self, x): out = F.relu6(x + 3, inplace=True) / 6 return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_div_hardtanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 3.0 tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp5 = 6.0 tmp6 = triton_helpers.minimum(tmp4, tmp5) tmp7 = 0.16666666666666666 tmp8 = tmp6 * tmp7 tl.store(out_ptr0 + x0, tmp8, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_hardtanh_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf0, class HSigmoidNew(nn.Module): def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
JasonZuu/Frame-Selection
HSigmoid
false
5,380
[ "BSD-3-Clause" ]
1
3eb6ecdbf8e5695ba53752bdd8446def9c5cfbb9
https://github.com/JasonZuu/Frame-Selection/tree/3eb6ecdbf8e5695ba53752bdd8446def9c5cfbb9
import torch import torch.nn as nn import torch.nn.functional as F class Model(nn.Module): def forward(self, x): out = F.relu6(x + 3, inplace=True) / 6 return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
SmoothTranslationLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/si/csi7zswrgkt5rqoak2nctfstk2b7vqihfvbcs5nlpzurs2rijsjs.py # Topologically Sorted Source Nodes: [cat, smooth_translation_loss_1, smooth_translation_loss_2, smooth_translation_loss_3, smooth_translation_loss_4], Original ATen: [aten.cat, aten.sum, aten.mul, aten.mean] # Source node to ATen node mapping: # cat => cat # smooth_translation_loss_1 => sum_2 # smooth_translation_loss_2 => mul # smooth_translation_loss_3 => mul_1 # smooth_translation_loss_4 => mean # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%full_default, %sum_1],), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%cat, [-1]), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_2, 1000.0), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, 1.0), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%mul_1,), kwargs = {}) triton_per_fused_cat_mean_mul_sum_0 = async_compile.triton('triton_per_fused_cat_mean_mul_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 64], reduction_hint=ReductionHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=(2,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_cat_mean_mul_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_cat_mean_mul_sum_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = (rindex // 16) r0 = rindex % 16 r2 = rindex tmp0 = r1 tmp1 = tl.full([1, 1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1, 1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = 0.0 tmp6 = tl.full(tmp5.shape, 0.0, tmp5.dtype) tmp7 = tl.where(tmp4, tmp5, tmp6) tmp8 = tmp0 >= tmp3 tmp9 = tl.full([1, 1], 4, tl.int64) tmp10 = tmp0 < tmp9 tmp11 = tl.load(in_ptr0 + (tl.broadcast_to(64 + (4*r0) + (64*((-1) + r1)), [XBLOCK, RBLOCK])), tmp8, eviction_policy='evict_last', other=0.0) tmp12 = tl.load(in_ptr0 + (tl.broadcast_to((4*r0) + (64*((-1) + r1)), [XBLOCK, RBLOCK])), tmp8, eviction_policy='evict_last', other=0.0) tmp13 = tmp11 - tmp12 tmp14 = tl_math.abs(tmp13) tmp15 = tl.load(in_ptr0 + (tl.broadcast_to(65 + (4*r0) + (64*((-1) + r1)), [XBLOCK, RBLOCK])), tmp8, eviction_policy='evict_last', other=0.0) tmp16 = tl.load(in_ptr0 + (tl.broadcast_to(1 + (4*r0) + (64*((-1) + r1)), [XBLOCK, RBLOCK])), tmp8, eviction_policy='evict_last', other=0.0) tmp17 = tmp15 - tmp16 tmp18 = tl_math.abs(tmp17) tmp19 = tmp14 + tmp18 tmp20 = tl.load(in_ptr0 + (tl.broadcast_to(66 + (4*r0) + (64*((-1) + r1)), [XBLOCK, RBLOCK])), tmp8, eviction_policy='evict_last', other=0.0) tmp21 = tl.load(in_ptr0 + (tl.broadcast_to(2 + (4*r0) + (64*((-1) + r1)), [XBLOCK, RBLOCK])), tmp8, eviction_policy='evict_last', other=0.0) tmp22 = tmp20 - tmp21 tmp23 = tl_math.abs(tmp22) tmp24 = tmp19 + tmp23 tmp25 = tl.load(in_ptr0 + (tl.broadcast_to(67 + (4*r0) + (64*((-1) + r1)), [XBLOCK, RBLOCK])), tmp8, eviction_policy='evict_last', other=0.0) tmp26 = tl.load(in_ptr0 + (tl.broadcast_to(3 + (4*r0) + (64*((-1) + r1)), [XBLOCK, RBLOCK])), tmp8, eviction_policy='evict_last', other=0.0) tmp27 = tmp25 - tmp26 tmp28 = tl_math.abs(tmp27) tmp29 = tmp24 + tmp28 tmp30 = tl.full(tmp29.shape, 0.0, tmp29.dtype) tmp31 = tl.where(tmp8, tmp29, tmp30) tmp32 = tl.where(tmp4, tmp7, tmp31) tmp33 = 1000.0 tmp34 = tmp32 * tmp33 tmp35 = 1.0 tmp36 = tmp34 * tmp35 tmp37 = tl.broadcast_to(tmp36, [XBLOCK, RBLOCK]) tmp39 = tl.sum(tmp37, 1)[:, None] tmp40 = 64.0 tmp41 = tmp39 / tmp40 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp41, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((), (), torch.float32) buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [cat, smooth_translation_loss_1, smooth_translation_loss_2, smooth_translation_loss_3, smooth_translation_loss_4], Original ATen: [aten.cat, aten.sum, aten.mul, aten.mean] stream0 = get_raw_stream(0) triton_per_fused_cat_mean_mul_sum_0.run(buf2, arg0_1, 1, 64, grid=grid(1), stream=stream0) del arg0_1 return (buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.cuda.comm class SmoothTranslationLoss(nn.Module): """Smooth loss for translations. Args: reduction (str, optional): The method that reduces the loss to a scalar. Options are "none", "mean" and "sum". loss_weight (float, optional): The weight of the loss. Defaults to 1.0 """ def __init__(self, reduction='mean', loss_weight=1.0): super().__init__() assert reduction in (None, 'none', 'mean', 'sum') self.reduction = reduction self.loss_weight = loss_weight def forward(self, translation, loss_weight_override=None, reduction_override=None): """Forward function of loss. Args: translation (torch.Tensor): The body translation parameters loss_weight_override (float, optional): The weight of loss used to override the original weight of loss reduction_override (str, optional): The reduction method used to override the original reduction method of the loss. Defaults to None Returns: torch.Tensor: The calculated loss """ assert reduction_override in (None, 'none', 'mean', 'sum') reduction = (reduction_override if reduction_override else self. reduction) loss_weight = (loss_weight_override if loss_weight_override is not None else self.loss_weight) translation_diff = translation[1:] - translation[:-1] smooth_translation_loss = translation_diff.abs().sum(dim=-1, keepdim=True) smooth_translation_loss = torch.cat([torch.zeros_like( smooth_translation_loss)[:1], smooth_translation_loss]).sum(dim=-1) smooth_translation_loss *= 1000.0 smooth_translation_loss = loss_weight * smooth_translation_loss if reduction == 'mean': smooth_translation_loss = smooth_translation_loss.mean() elif reduction == 'sum': smooth_translation_loss = smooth_translation_loss.sum() return smooth_translation_loss def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch.cuda.comm assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_cat_mean_mul_sum_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex // 16 r0 = rindex % 16 tmp0 = r1 tl.full([1, 1], 0, tl.int64) tmp3 = tl.full([1, 1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = 0.0 tmp6 = tl.full(tmp5.shape, 0.0, tmp5.dtype) tmp7 = tl.where(tmp4, tmp5, tmp6) tmp8 = tmp0 >= tmp3 tl.full([1, 1], 4, tl.int64) tmp11 = tl.load(in_ptr0 + tl.broadcast_to(64 + 4 * r0 + 64 * (-1 + r1), [XBLOCK, RBLOCK]), tmp8, eviction_policy='evict_last', other=0.0) tmp12 = tl.load(in_ptr0 + tl.broadcast_to(4 * r0 + 64 * (-1 + r1), [ XBLOCK, RBLOCK]), tmp8, eviction_policy='evict_last', other=0.0) tmp13 = tmp11 - tmp12 tmp14 = tl_math.abs(tmp13) tmp15 = tl.load(in_ptr0 + tl.broadcast_to(65 + 4 * r0 + 64 * (-1 + r1), [XBLOCK, RBLOCK]), tmp8, eviction_policy='evict_last', other=0.0) tmp16 = tl.load(in_ptr0 + tl.broadcast_to(1 + 4 * r0 + 64 * (-1 + r1), [XBLOCK, RBLOCK]), tmp8, eviction_policy='evict_last', other=0.0) tmp17 = tmp15 - tmp16 tmp18 = tl_math.abs(tmp17) tmp19 = tmp14 + tmp18 tmp20 = tl.load(in_ptr0 + tl.broadcast_to(66 + 4 * r0 + 64 * (-1 + r1), [XBLOCK, RBLOCK]), tmp8, eviction_policy='evict_last', other=0.0) tmp21 = tl.load(in_ptr0 + tl.broadcast_to(2 + 4 * r0 + 64 * (-1 + r1), [XBLOCK, RBLOCK]), tmp8, eviction_policy='evict_last', other=0.0) tmp22 = tmp20 - tmp21 tmp23 = tl_math.abs(tmp22) tmp24 = tmp19 + tmp23 tmp25 = tl.load(in_ptr0 + tl.broadcast_to(67 + 4 * r0 + 64 * (-1 + r1), [XBLOCK, RBLOCK]), tmp8, eviction_policy='evict_last', other=0.0) tmp26 = tl.load(in_ptr0 + tl.broadcast_to(3 + 4 * r0 + 64 * (-1 + r1), [XBLOCK, RBLOCK]), tmp8, eviction_policy='evict_last', other=0.0) tmp27 = tmp25 - tmp26 tmp28 = tl_math.abs(tmp27) tmp29 = tmp24 + tmp28 tmp30 = tl.full(tmp29.shape, 0.0, tmp29.dtype) tmp31 = tl.where(tmp8, tmp29, tmp30) tmp32 = tl.where(tmp4, tmp7, tmp31) tmp33 = 1000.0 tmp34 = tmp32 * tmp33 tmp35 = 1.0 tmp36 = tmp34 * tmp35 tmp37 = tl.broadcast_to(tmp36, [XBLOCK, RBLOCK]) tmp39 = tl.sum(tmp37, 1)[:, None] tmp40 = 64.0 tmp41 = tmp39 / tmp40 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp41, None) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((), (), torch.float32) buf2 = buf1 del buf1 get_raw_stream(0) triton_per_fused_cat_mean_mul_sum_0[grid(1)](buf2, arg0_1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 return buf2, class SmoothTranslationLossNew(nn.Module): """Smooth loss for translations. Args: reduction (str, optional): The method that reduces the loss to a scalar. Options are "none", "mean" and "sum". loss_weight (float, optional): The weight of the loss. Defaults to 1.0 """ def __init__(self, reduction='mean', loss_weight=1.0): super().__init__() assert reduction in (None, 'none', 'mean', 'sum') self.reduction = reduction self.loss_weight = loss_weight def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
JasonBoy1/mmhuman3d
SmoothTranslationLoss
false
5,381
[ "Apache-2.0" ]
1
79b2665191115f3ed905e6afdf09990a8d484362
https://github.com/JasonBoy1/mmhuman3d/tree/79b2665191115f3ed905e6afdf09990a8d484362
import torch import torch.nn as nn import torch.cuda.comm class Model(nn.Module): """Smooth loss for translations. Args: reduction (str, optional): The method that reduces the loss to a scalar. Options are "none", "mean" and "sum". loss_weight (float, optional): The weight of the loss. Defaults to 1.0 """ def __init__(self, reduction='mean', loss_weight=1.0): super().__init__() assert reduction in (None, 'none', 'mean', 'sum') self.reduction = reduction self.loss_weight = loss_weight def forward(self, translation, loss_weight_override=None, reduction_override=None): """Forward function of loss. Args: translation (torch.Tensor): The body translation parameters loss_weight_override (float, optional): The weight of loss used to override the original weight of loss reduction_override (str, optional): The reduction method used to override the original reduction method of the loss. Defaults to None Returns: torch.Tensor: The calculated loss """ assert reduction_override in (None, 'none', 'mean', 'sum') reduction = (reduction_override if reduction_override else self. reduction) loss_weight = (loss_weight_override if loss_weight_override is not None else self.loss_weight) translation_diff = translation[1:] - translation[:-1] smooth_translation_loss = translation_diff.abs().sum(dim=-1, keepdim=True) smooth_translation_loss = torch.cat([torch.zeros_like( smooth_translation_loss)[:1], smooth_translation_loss]).sum(dim=-1) smooth_translation_loss *= 1000.0 smooth_translation_loss = loss_weight * smooth_translation_loss if reduction == 'mean': smooth_translation_loss = smooth_translation_loss.mean() elif reduction == 'sum': smooth_translation_loss = smooth_translation_loss.sum() return smooth_translation_loss def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
GatedConv2d
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/bm/cbms7k7oqr57xzgjmhazevrue5uae6mh2w7czwc7inftkzj7zfey.py # Topologically Sorted Source Nodes: [temps], Original ATen: [aten.convolution] # Source node to ATen node mapping: # temps => convolution # Graph fragment: # %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [4, 4], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4096], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 2592 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 81) % 8 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/z3/cz33ttxbb3mrc2hi5wvc77rhp7nbghz2m25q3tb6nqzbjr536aek.py # Topologically Sorted Source Nodes: [outputs], Original ATen: [aten.glu] # Source node to ATen node mapping: # outputs => glu # Graph fragment: # %glu : [num_users=1] = call_function[target=torch.ops.aten.glu.default](args = (%convolution, 1), kwargs = {}) triton_poi_fused_glu_1 = async_compile.triton('triton_poi_fused_glu_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2048], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_glu_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_glu_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1296 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 324 x1 = (xindex // 324) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (648*x1)), xmask) tmp1 = tl.load(in_ptr0 + (324 + x0 + (648*x1)), xmask) tmp2 = tl.sigmoid(tmp1) tmp3 = tmp0 * tmp2 tl.store(out_ptr0 + (x2), tmp3, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (8, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (8, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [temps], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(4, 4), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 8, 9, 9), (648, 81, 9, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [temps], Original ATen: [aten.convolution] stream0 = get_raw_stream(0) triton_poi_fused_convolution_0.run(buf1, primals_2, 2592, grid=grid(2592), stream=stream0) del primals_2 buf2 = empty_strided_cuda((4, 4, 9, 9), (324, 81, 9, 1), torch.float32) # Topologically Sorted Source Nodes: [outputs], Original ATen: [aten.glu] triton_poi_fused_glu_1.run(buf1, buf2, 1296, grid=grid(1296), stream=stream0) return (buf2, primals_1, primals_3, buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((8, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn from torch.nn import functional as F class GatedConv2d(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride, padding, dilation=1): super(GatedConv2d, self).__init__() self.conv = nn.Conv2d(in_channels, 2 * out_channels, kernel_size, stride, padding, dilation) def forward(self, inputs): temps = self.conv(inputs) outputs = F.glu(temps, dim=1) return outputs def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4, 'stride': 1, 'padding': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 2592 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 81 % 8 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused_glu_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1296 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 324 x1 = xindex // 324 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 648 * x1), xmask) tmp1 = tl.load(in_ptr0 + (324 + x0 + 648 * x1), xmask) tmp2 = tl.sigmoid(tmp1) tmp3 = tmp0 * tmp2 tl.store(out_ptr0 + x2, tmp3, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (8, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (8,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(4, 4), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 8, 9, 9), (648, 81, 9, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(2592)](buf1, primals_2, 2592, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((4, 4, 9, 9), (324, 81, 9, 1), torch.float32) triton_poi_fused_glu_1[grid(1296)](buf1, buf2, 1296, XBLOCK=128, num_warps=4, num_stages=1) return buf2, primals_1, primals_3, buf1 class GatedConv2dNew(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride, padding, dilation=1): super(GatedConv2dNew, self).__init__() self.conv = nn.Conv2d(in_channels, 2 * out_channels, kernel_size, stride, padding, dilation) def forward(self, input_0): primals_1 = self.conv.weight primals_2 = self.conv.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
JamesRitchie/nsf
GatedConv2d
false
5,382
[ "MIT" ]
1
5628a6f8190c9e3840208da8baf5cf403ca9b892
https://github.com/JamesRitchie/nsf/tree/5628a6f8190c9e3840208da8baf5cf403ca9b892
import torch from torch import nn from torch.nn import functional as F class Model(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride, padding, dilation=1): super().__init__() self.conv = nn.Conv2d(in_channels, 2 * out_channels, kernel_size, stride, padding, dilation) def forward(self, inputs): temps = self.conv(inputs) outputs = F.glu(temps, dim=1) return outputs def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4, 'stride': 1, 'padding': 4}]
MSELoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/3o/c3ojeovop77jtjsbc2sbf6phxmf3ewz3f7gszih7ehz6obviaiu2.py # Topologically Sorted Source Nodes: [loss, loss_1, loss_2], Original ATen: [aten.mse_loss, aten.mean, aten.mul] # Source node to ATen node mapping: # loss => pow_1, sub # loss_1 => mean # loss_2 => mul # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %arg1_1), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%pow_1,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean, 1.0), kwargs = {}) triton_per_fused_mean_mse_loss_mul_0 = async_compile.triton('triton_per_fused_mean_mse_loss_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_mse_loss_mul_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_mean_mse_loss_mul_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.load(in_ptr1 + (r0), None) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp4 = tl.broadcast_to(tmp3, [RBLOCK]) tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0)) tmp7 = 256.0 tmp8 = tmp6 / tmp7 tmp9 = 1.0 tmp10 = tmp8 * tmp9 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp10, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [loss, loss_1, loss_2], Original ATen: [aten.mse_loss, aten.mean, aten.mul] stream0 = get_raw_stream(0) triton_per_fused_mean_mse_loss_mul_0.run(buf1, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import functools import torch import torch.nn as nn import torch.cuda.comm from torch.nn import functional as F def reduce_loss(loss, reduction): """Reduce loss as specified. Args: loss (Tensor): Elementwise loss tensor. reduction (str): Options are "none", "mean" and "sum". Return: Tensor: Reduced loss tensor. """ reduction_enum = F._Reduction.get_enum(reduction) if reduction_enum == 0: return loss elif reduction_enum == 1: return loss.mean() elif reduction_enum == 2: return loss.sum() def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None): """Apply element-wise weight and reduce loss. Args: loss (Tensor): Element-wise loss. weight (Tensor): Element-wise weights. reduction (str): Same as built-in losses of PyTorch. avg_factor (float): Avarage factor when computing the mean of losses. Returns: Tensor: Processed loss values. """ if weight is not None: loss = loss * weight if avg_factor is None: loss = reduce_loss(loss, reduction) elif reduction == 'mean': loss = loss.sum() / avg_factor elif reduction != 'none': raise ValueError('avg_factor can not be used with reduction="sum"') return loss def weighted_loss(loss_func): """Create a weighted version of a given loss function. To use this decorator, the loss function must have the signature like `loss_func(pred, target, **kwargs)`. The function only needs to compute element-wise loss without any reduction. This decorator will add weight and reduction arguments to the function. The decorated function will have the signature like `loss_func(pred, target, weight=None, reduction='mean', avg_factor=None, **kwargs)`. :Example: >>> import torch >>> @weighted_loss >>> def l1_loss(pred, target): >>> return (pred - target).abs() >>> pred = torch.Tensor([0, 2, 3]) >>> target = torch.Tensor([1, 1, 1]) >>> weight = torch.Tensor([1, 0, 1]) >>> l1_loss(pred, target) tensor(1.3333) >>> l1_loss(pred, target, weight) tensor(1.) >>> l1_loss(pred, target, reduction='none') tensor([1., 1., 2.]) >>> l1_loss(pred, target, weight, avg_factor=2) tensor(1.5000) """ @functools.wraps(loss_func) def wrapper(pred, target, weight=None, reduction='mean', avg_factor= None, **kwargs): loss = loss_func(pred, target, **kwargs) loss = weight_reduce_loss(loss, weight, reduction, avg_factor) return loss return wrapper @weighted_loss def mse_loss(pred, target): """Warpper of mse loss.""" return F.mse_loss(pred, target, reduction='none') class MSELoss(nn.Module): """MSELoss. Args: reduction (str, optional): The method that reduces the loss to a scalar. Options are "none", "mean" and "sum". loss_weight (float, optional): The weight of the loss. Defaults to 1.0 """ def __init__(self, reduction='mean', loss_weight=1.0): super().__init__() assert reduction in (None, 'none', 'mean', 'sum') self.reduction = reduction self.loss_weight = loss_weight def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None): """Forward function of loss. Args: pred (torch.Tensor): The prediction. target (torch.Tensor): The learning target of the prediction. weight (torch.Tensor, optional): Weight of the loss for each prediction. Defaults to None. avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. reduction_override (str, optional): The reduction method used to override the original reduction method of the loss. Defaults to None. Returns: torch.Tensor: The calculated loss """ assert reduction_override in (None, 'none', 'mean', 'sum') reduction = (reduction_override if reduction_override else self. reduction) loss = self.loss_weight * mse_loss(pred, target, weight, reduction= reduction, avg_factor=avg_factor) return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import functools import torch.nn as nn import torch.cuda.comm from torch.nn import functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_mean_mse_loss_mul_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp4 = tl.broadcast_to(tmp3, [RBLOCK]) tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0)) tmp7 = 256.0 tmp8 = tmp6 / tmp7 tmp9 = 1.0 tmp10 = tmp8 * tmp9 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp10, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_mean_mse_loss_mul_0[grid(1)](buf1, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, def reduce_loss(loss, reduction): """Reduce loss as specified. Args: loss (Tensor): Elementwise loss tensor. reduction (str): Options are "none", "mean" and "sum". Return: Tensor: Reduced loss tensor. """ reduction_enum = F._Reduction.get_enum(reduction) if reduction_enum == 0: return loss elif reduction_enum == 1: return loss.mean() elif reduction_enum == 2: return loss.sum() def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None): """Apply element-wise weight and reduce loss. Args: loss (Tensor): Element-wise loss. weight (Tensor): Element-wise weights. reduction (str): Same as built-in losses of PyTorch. avg_factor (float): Avarage factor when computing the mean of losses. Returns: Tensor: Processed loss values. """ if weight is not None: loss = loss * weight if avg_factor is None: loss = reduce_loss(loss, reduction) elif reduction == 'mean': loss = loss.sum() / avg_factor elif reduction != 'none': raise ValueError('avg_factor can not be used with reduction="sum"') return loss def weighted_loss(loss_func): """Create a weighted version of a given loss function. To use this decorator, the loss function must have the signature like `loss_func(pred, target, **kwargs)`. The function only needs to compute element-wise loss without any reduction. This decorator will add weight and reduction arguments to the function. The decorated function will have the signature like `loss_func(pred, target, weight=None, reduction='mean', avg_factor=None, **kwargs)`. :Example: >>> import torch >>> @weighted_loss >>> def l1_loss(pred, target): >>> return (pred - target).abs() >>> pred = torch.Tensor([0, 2, 3]) >>> target = torch.Tensor([1, 1, 1]) >>> weight = torch.Tensor([1, 0, 1]) >>> l1_loss(pred, target) tensor(1.3333) >>> l1_loss(pred, target, weight) tensor(1.) >>> l1_loss(pred, target, reduction='none') tensor([1., 1., 2.]) >>> l1_loss(pred, target, weight, avg_factor=2) tensor(1.5000) """ @functools.wraps(loss_func) def wrapper(pred, target, weight=None, reduction='mean', avg_factor= None, **kwargs): loss = loss_func(pred, target, **kwargs) loss = weight_reduce_loss(loss, weight, reduction, avg_factor) return loss return wrapper @weighted_loss def mse_loss(pred, target): """Warpper of mse loss.""" return F.mse_loss(pred, target, reduction='none') class MSELossNew(nn.Module): """MSELoss. Args: reduction (str, optional): The method that reduces the loss to a scalar. Options are "none", "mean" and "sum". loss_weight (float, optional): The weight of the loss. Defaults to 1.0 """ def __init__(self, reduction='mean', loss_weight=1.0): super().__init__() assert reduction in (None, 'none', 'mean', 'sum') self.reduction = reduction self.loss_weight = loss_weight def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
JasonBoy1/mmhuman3d
MSELoss
false
5,383
[ "Apache-2.0" ]
1
79b2665191115f3ed905e6afdf09990a8d484362
https://github.com/JasonBoy1/mmhuman3d/tree/79b2665191115f3ed905e6afdf09990a8d484362
import functools import torch import torch.nn as nn import torch.cuda.comm from torch.nn import functional as F def reduce_loss(loss, reduction): """Reduce loss as specified. Args: loss (Tensor): Elementwise loss tensor. reduction (str): Options are "none", "mean" and "sum". Return: Tensor: Reduced loss tensor. """ reduction_enum = F._Reduction.get_enum(reduction) if reduction_enum == 0: return loss elif reduction_enum == 1: return loss.mean() elif reduction_enum == 2: return loss.sum() def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None): """Apply element-wise weight and reduce loss. Args: loss (Tensor): Element-wise loss. weight (Tensor): Element-wise weights. reduction (str): Same as built-in losses of PyTorch. avg_factor (float): Avarage factor when computing the mean of losses. Returns: Tensor: Processed loss values. """ if weight is not None: loss = loss * weight if avg_factor is None: loss = reduce_loss(loss, reduction) elif reduction == 'mean': loss = loss.sum() / avg_factor elif reduction != 'none': raise ValueError('avg_factor can not be used with reduction="sum"') return loss def weighted_loss(loss_func): """Create a weighted version of a given loss function. To use this decorator, the loss function must have the signature like `loss_func(pred, target, **kwargs)`. The function only needs to compute element-wise loss without any reduction. This decorator will add weight and reduction arguments to the function. The decorated function will have the signature like `loss_func(pred, target, weight=None, reduction='mean', avg_factor=None, **kwargs)`. :Example: >>> import torch >>> @weighted_loss >>> def l1_loss(pred, target): >>> return (pred - target).abs() >>> pred = torch.Tensor([0, 2, 3]) >>> target = torch.Tensor([1, 1, 1]) >>> weight = torch.Tensor([1, 0, 1]) >>> l1_loss(pred, target) tensor(1.3333) >>> l1_loss(pred, target, weight) tensor(1.) >>> l1_loss(pred, target, reduction='none') tensor([1., 1., 2.]) >>> l1_loss(pred, target, weight, avg_factor=2) tensor(1.5000) """ @functools.wraps(loss_func) def wrapper(pred, target, weight=None, reduction='mean', avg_factor= None, **kwargs): loss = loss_func(pred, target, **kwargs) loss = weight_reduce_loss(loss, weight, reduction, avg_factor) return loss return wrapper @weighted_loss def mse_loss(pred, target): """Warpper of mse loss.""" return F.mse_loss(pred, target, reduction='none') class Model(nn.Module): """MSELoss. Args: reduction (str, optional): The method that reduces the loss to a scalar. Options are "none", "mean" and "sum". loss_weight (float, optional): The weight of the loss. Defaults to 1.0 """ def __init__(self, reduction='mean', loss_weight=1.0): super().__init__() assert reduction in (None, 'none', 'mean', 'sum') self.reduction = reduction self.loss_weight = loss_weight def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None): """Forward function of loss. Args: pred (torch.Tensor): The prediction. target (torch.Tensor): The learning target of the prediction. weight (torch.Tensor, optional): Weight of the loss for each prediction. Defaults to None. avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. reduction_override (str, optional): The reduction method used to override the original reduction method of the loss. Defaults to None. Returns: torch.Tensor: The c # ... truncated (>4000 chars) for memory efficiency
GCNClassification
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/3w/c3w6ed26vbtzgzigkkyuiuwphm777x7aoogcxkvoly3ia7qifnzg.py # Topologically Sorted Source Nodes: [embeddings_1], Original ATen: [aten.relu] # Source node to ATen node mapping: # embeddings_1 => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%mm_1,), kwargs = {}) triton_poi_fused_relu_0 = async_compile.triton('triton_poi_fused_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_0(in_out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tl.store(in_out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/fw/cfweytfxnso6abkha6rk5gwco2kbvibvw4vuddh56meprt4wzlj6.py # Topologically Sorted Source Nodes: [combined_rep], Original ATen: [aten.cat] # Source node to ATen node mapping: # combined_rep => cat # Graph fragment: # %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_2, %relu_1], 1), kwargs = {}) triton_poi_fused_cat_1 = async_compile.triton('triton_poi_fused_cat_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = (xindex // 8) x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.full([1], 0, tl.int32) tmp11 = triton_helpers.maximum(tmp10, tmp9) tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype) tmp13 = tl.where(tmp6, tmp11, tmp12) tmp14 = tl.where(tmp4, tmp5, tmp13) tl.store(out_ptr0 + (x2), tmp14, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/nc/cncd5yvgjpjfrvyaveva55remy3ckx5quvuhdmurdt3pp6k3qtux.py # Topologically Sorted Source Nodes: [hidden_rep], Original ATen: [aten.relu] # Source node to ATen node mapping: # hidden_rep => relu_2 # Graph fragment: # %add_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_6), kwargs = {}) # %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_1,), kwargs = {}) triton_poi_fused_relu_2 = async_compile.triton('triton_poi_fused_relu_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/bh/cbhtmvezuerduixfmmlufdezlxhnkrnst5yyfg6jm4v6w6pkgfrl.py # Topologically Sorted Source Nodes: [logits], Original ATen: [aten.mean] # Source node to ATen node mapping: # logits => mean # Graph fragment: # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%addmm_2, [0]), kwargs = {}) triton_poi_fused_mean_3 = async_compile.triton('triton_poi_fused_mean_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mean_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mean_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr0 + (4 + x0), xmask) tmp3 = tl.load(in_ptr0 + (8 + x0), xmask) tmp5 = tl.load(in_ptr0 + (12 + x0), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tl.store(out_ptr0 + (x0), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/cs/ccserabwgg7d2qqq373bkl66yqoopjygng3rto5goox6mh6ekjmx.py # Topologically Sorted Source Nodes: [embeddings_4], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # embeddings_4 => relu_1 # Graph fragment: # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%mm_3,), kwargs = {}) # %le_2 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_4 = async_compile.triton('triton_poi_fused_relu_threshold_backward_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_4(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp3 = 0.0 tmp4 = tmp2 <= tmp3 tl.store(out_ptr0 + (x0), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, 8), (8, 1)) assert_size_stride(primals_6, (4, ), (1, )) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (4, ), (1, )) assert_size_stride(primals_9, (4, 4), (4, 1)) assert_size_stride(primals_10, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [support], Original ATen: [aten.mm] extern_kernels.mm(primals_2, primals_1, out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [embeddings], Original ATen: [aten.mm] extern_kernels.mm(primals_3, buf0, out=buf1) buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [embeddings_1], Original ATen: [aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_relu_0.run(buf2, 16, grid=grid(16), stream=stream0) buf3 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [support_1], Original ATen: [aten.mm] extern_kernels.mm(buf2, primals_4, out=buf3) buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [embeddings_3], Original ATen: [aten.mm] extern_kernels.mm(primals_3, buf3, out=buf4) buf5 = empty_strided_cuda((4, 8), (8, 1), torch.float32) # Topologically Sorted Source Nodes: [combined_rep], Original ATen: [aten.cat] triton_poi_fused_cat_1.run(primals_2, buf4, buf5, 32, grid=grid(32), stream=stream0) buf6 = buf3; del buf3 # reuse # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf5, reinterpret_tensor(primals_5, (8, 4), (1, 8), 0), out=buf6) buf7 = buf6; del buf6 # reuse # Topologically Sorted Source Nodes: [hidden_rep], Original ATen: [aten.relu] triton_poi_fused_relu_2.run(buf7, primals_6, 16, grid=grid(16), stream=stream0) del primals_6 buf8 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf7, reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf8) buf9 = buf8; del buf8 # reuse # Topologically Sorted Source Nodes: [graph_rep], Original ATen: [aten.relu] triton_poi_fused_relu_2.run(buf9, primals_8, 16, grid=grid(16), stream=stream0) del primals_8 buf10 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.addmm] extern_kernels.addmm(primals_10, buf9, reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf10) del primals_10 buf11 = empty_strided_cuda((4, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [logits], Original ATen: [aten.mean] triton_poi_fused_mean_3.run(buf10, buf11, 4, grid=grid(4), stream=stream0) del buf10 buf12 = empty_strided_cuda((4, 4), (4, 1), torch.bool) # Topologically Sorted Source Nodes: [embeddings_4], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_4.run(buf4, buf12, 16, grid=grid(16), stream=stream0) del buf4 return (buf11, buf2, buf5, buf7, buf9, primals_9, primals_7, primals_5, buf12, reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.utils.data class Readout(nn.Module): """ This module learns a single graph level representation for a molecule given GraphSAGE generated embeddings """ def __init__(self, attr_dim, embedding_dim, hidden_dim, output_dim, num_cats): super(Readout, self).__init__() self.attr_dim = attr_dim self.hidden_dim = hidden_dim self.output_dim = output_dim self.num_cats = num_cats self.layer1 = nn.Linear(attr_dim + embedding_dim, hidden_dim) self.layer2 = nn.Linear(hidden_dim, output_dim) self.output = nn.Linear(output_dim, num_cats) self.act = nn.ReLU() def forward(self, node_features, node_embeddings): combined_rep = torch.cat((node_features, node_embeddings), dim=1) hidden_rep = self.act(self.layer1(combined_rep)) graph_rep = self.act(self.layer2(hidden_rep)) logits = torch.mean(self.output(graph_rep), dim=0) return logits class GCN(nn.Module): """ Graph Convolutional Network based on https://arxiv.org/abs/1609.02907 """ def __init__(self, feat_dim, hidden_dim1, hidden_dim2, dropout, is_sparse=False): """Dense version of GAT.""" super(GCN, self).__init__() self.W1 = nn.Parameter(torch.FloatTensor(feat_dim, hidden_dim1)) self.W2 = nn.Parameter(torch.FloatTensor(hidden_dim1, hidden_dim2)) self.relu = nn.ReLU() self.dropout = nn.Dropout(p=dropout) nn.init.xavier_uniform_(self.W1.data) nn.init.xavier_uniform_(self.W2.data) self.is_sparse = is_sparse def forward(self, x, adj): support = torch.mm(x, self.W1) embeddings = torch.sparse.mm(adj, support ) if self.is_sparse else torch.mm(adj, support) embeddings = self.relu(embeddings) embeddings = self.dropout(embeddings) support = torch.mm(embeddings, self.W2) embeddings = torch.sparse.mm(adj, support ) if self.is_sparse else torch.mm(adj, support) embeddings = self.relu(embeddings) return embeddings class GCNClassification(nn.Module): """ Network that consolidates GCN + Readout into a single nn.Module """ def __init__(self, feat_dim, hidden_dim, node_embedding_dim, dropout, readout_hidden_dim, graph_embedding_dim, num_categories, sparse_adj =False): super(GCNClassification, self).__init__() self.gcn = GCN(feat_dim, hidden_dim, node_embedding_dim, dropout, is_sparse=sparse_adj) self.readout = Readout(feat_dim, node_embedding_dim, readout_hidden_dim, graph_embedding_dim, num_categories) def forward(self, adj_matrix, feature_matrix): node_embeddings = self.gcn(feature_matrix, adj_matrix) logits = self.readout(feature_matrix, node_embeddings) return logits def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'feat_dim': 4, 'hidden_dim': 4, 'node_embedding_dim': 4, 'dropout': 0.5, 'readout_hidden_dim': 4, 'graph_embedding_dim': 4, 'num_categories': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_0(in_out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tl.store(in_out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_cat_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.full([1], 0, tl.int32) tmp11 = triton_helpers.maximum(tmp10, tmp9) tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype) tmp13 = tl.where(tmp6, tmp11, tmp12) tmp14 = tl.where(tmp4, tmp5, tmp13) tl.store(out_ptr0 + x2, tmp14, xmask) @triton.jit def triton_poi_fused_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_mean_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + (4 + x0), xmask) tmp3 = tl.load(in_ptr0 + (8 + x0), xmask) tmp5 = tl.load(in_ptr0 + (12 + x0), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tl.store(out_ptr0 + x0, tmp8, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp3 = 0.0 tmp4 = tmp2 <= tmp3 tl.store(out_ptr0 + x0, tmp4, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, 8), (8, 1)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (4, 4), (4, 1)) assert_size_stride(primals_10, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_2, primals_1, out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_3, buf0, out=buf1) buf2 = buf1 del buf1 get_raw_stream(0) triton_poi_fused_relu_0[grid(16)](buf2, 16, XBLOCK=16, num_warps=1, num_stages=1) buf3 = buf0 del buf0 extern_kernels.mm(buf2, primals_4, out=buf3) buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_3, buf3, out=buf4) buf5 = empty_strided_cuda((4, 8), (8, 1), torch.float32) triton_poi_fused_cat_1[grid(32)](primals_2, buf4, buf5, 32, XBLOCK= 32, num_warps=1, num_stages=1) buf6 = buf3 del buf3 extern_kernels.mm(buf5, reinterpret_tensor(primals_5, (8, 4), (1, 8 ), 0), out=buf6) buf7 = buf6 del buf6 triton_poi_fused_relu_2[grid(16)](buf7, primals_6, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_6 buf8 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf7, reinterpret_tensor(primals_7, (4, 4), (1, 4 ), 0), out=buf8) buf9 = buf8 del buf8 triton_poi_fused_relu_2[grid(16)](buf9, primals_8, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_8 buf10 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_10, buf9, reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf10) del primals_10 buf11 = empty_strided_cuda((4,), (1,), torch.float32) triton_poi_fused_mean_3[grid(4)](buf10, buf11, 4, XBLOCK=4, num_warps=1, num_stages=1) del buf10 buf12 = empty_strided_cuda((4, 4), (4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_4[grid(16)](buf4, buf12, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf4 return (buf11, buf2, buf5, buf7, buf9, primals_9, primals_7, primals_5, buf12, reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0)) class Readout(nn.Module): """ This module learns a single graph level representation for a molecule given GraphSAGE generated embeddings """ def __init__(self, attr_dim, embedding_dim, hidden_dim, output_dim, num_cats): super(Readout, self).__init__() self.attr_dim = attr_dim self.hidden_dim = hidden_dim self.output_dim = output_dim self.num_cats = num_cats self.layer1 = nn.Linear(attr_dim + embedding_dim, hidden_dim) self.layer2 = nn.Linear(hidden_dim, output_dim) self.output = nn.Linear(output_dim, num_cats) self.act = nn.ReLU() def forward(self, node_features, node_embeddings): combined_rep = torch.cat((node_features, node_embeddings), dim=1) hidden_rep = self.act(self.layer1(combined_rep)) graph_rep = self.act(self.layer2(hidden_rep)) logits = torch.mean(self.output(graph_rep), dim=0) return logits class GCN(nn.Module): """ Graph Convolutional Network based on https://arxiv.org/abs/1609.02907 """ def __init__(self, feat_dim, hidden_dim1, hidden_dim2, dropout, is_sparse=False): """Dense version of GAT.""" super(GCN, self).__init__() self.W1 = nn.Parameter(torch.FloatTensor(feat_dim, hidden_dim1)) self.W2 = nn.Parameter(torch.FloatTensor(hidden_dim1, hidden_dim2)) self.relu = nn.ReLU() self.dropout = nn.Dropout(p=dropout) nn.init.xavier_uniform_(self.W1.data) nn.init.xavier_uniform_(self.W2.data) self.is_sparse = is_sparse def forward(self, x, adj): support = torch.mm(x, self.W1) embeddings = torch.sparse.mm(adj, support ) if self.is_sparse else torch.mm(adj, support) embeddings = self.relu(embeddings) embeddings = self.dropout(embeddings) support = torch.mm(embeddings, self.W2) embeddings = torch.sparse.mm(adj, support ) if self.is_sparse else torch.mm(adj, support) embeddings = self.relu(embeddings) return embeddings class GCNClassificationNew(nn.Module): """ Network that consolidates GCN + Readout into a single nn.Module """ def __init__(self, feat_dim, hidden_dim, node_embedding_dim, dropout, readout_hidden_dim, graph_embedding_dim, num_categories, sparse_adj =False): super(GCNClassificationNew, self).__init__() self.gcn = GCN(feat_dim, hidden_dim, node_embedding_dim, dropout, is_sparse=sparse_adj) self.readout = Readout(feat_dim, node_embedding_dim, readout_hidden_dim, graph_embedding_dim, num_categories) def forward(self, input_0, input_1): primals_1 = self.gcn.W1 primals_2 = self.gcn.W2 primals_5 = self.readout.layer1.weight primals_6 = self.readout.layer1.bias primals_3 = self.readout.layer2.weight primals_8 = self.readout.layer2.bias primals_4 = self.readout.output.weight primals_10 = self.readout.output.bias primals_7 = input_0 primals_9 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10]) return output[0]
JW9MsjwjnpdRLFw/TSFL
GCNClassification
false
5,384
[ "MIT" ]
1
ccca391348fde270c9d43149a3397ac3cad4c6e0
https://github.com/JW9MsjwjnpdRLFw/TSFL/tree/ccca391348fde270c9d43149a3397ac3cad4c6e0
import torch import torch.nn as nn import torch.utils.data class Readout(nn.Module): """ This module learns a single graph level representation for a molecule given GraphSAGE generated embeddings """ def __init__(self, attr_dim, embedding_dim, hidden_dim, output_dim, num_cats): super().__init__() self.attr_dim = attr_dim self.hidden_dim = hidden_dim self.output_dim = output_dim self.num_cats = num_cats self.layer1 = nn.Linear(attr_dim + embedding_dim, hidden_dim) self.layer2 = nn.Linear(hidden_dim, output_dim) self.output = nn.Linear(output_dim, num_cats) self.act = nn.ReLU() def forward(self, node_features, node_embeddings): combined_rep = torch.cat((node_features, node_embeddings), dim=1) hidden_rep = self.act(self.layer1(combined_rep)) graph_rep = self.act(self.layer2(hidden_rep)) logits = torch.mean(self.output(graph_rep), dim=0) return logits class GCN(nn.Module): """ Graph Convolutional Network based on https://arxiv.org/abs/1609.02907 """ def __init__(self, feat_dim, hidden_dim1, hidden_dim2, dropout, is_sparse=False): """Dense version of GAT.""" super().__init__() self.W1 = nn.Parameter(torch.FloatTensor(feat_dim, hidden_dim1)) self.W2 = nn.Parameter(torch.FloatTensor(hidden_dim1, hidden_dim2)) self.relu = nn.ReLU() self.dropout = nn.Dropout(p=dropout) nn.init.xavier_uniform_(self.W1.data) nn.init.xavier_uniform_(self.W2.data) self.is_sparse = is_sparse def forward(self, x, adj): support = torch.mm(x, self.W1) embeddings = torch.sparse.mm(adj, support ) if self.is_sparse else torch.mm(adj, support) embeddings = self.relu(embeddings) embeddings = self.dropout(embeddings) support = torch.mm(embeddings, self.W2) embeddings = torch.sparse.mm(adj, support ) if self.is_sparse else torch.mm(adj, support) embeddings = self.relu(embeddings) return embeddings class Model(nn.Module): """ Network that consolidates GCN + Readout into a single nn.Module """ def __init__(self, feat_dim, hidden_dim, node_embedding_dim, dropout, readout_hidden_dim, graph_embedding_dim, num_categories, sparse_adj =False): super().__init__() self.gcn = GCN(feat_dim, hidden_dim, node_embedding_dim, dropout, is_sparse=sparse_adj) self.readout = Readout(feat_dim, node_embedding_dim, readout_hidden_dim, graph_embedding_dim, num_categories) def forward(self, adj_matrix, feature_matrix): node_embeddings = self.gcn(feature_matrix, adj_matrix) logits = self.readout(feature_matrix, node_embeddings) return logits def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'feat_dim': 4, 'hidden_dim': 4, 'node_embedding_dim': 4, 'dropout': 0.5, 'readout_hidden_dim': 4, 'graph_embedding_dim': 4, 'num_categories': 4}]
RBF_Kernel
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/qs/cqsdkzximvk3btioxxrtesr5guv7hozfpzpdrwrr36vwtvxl3s3k.py # Topologically Sorted Source Nodes: [mul, add, dnorm2], Original ATen: [aten.mul, aten.add] # Source node to ATen node mapping: # add => add # dnorm2 => add_1 # mul => mul # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mm_1, -2), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %unsqueeze), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %unsqueeze_1), kwargs = {}) triton_poi_fused_add_mul_0 = async_compile.triton('triton_poi_fused_add_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp3 = tl.load(in_ptr0 + (5*x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + (5*x0), xmask, eviction_policy='evict_last') tmp1 = -2.0 tmp2 = tmp0 * tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tl.store(in_out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [XY], Original ATen: [aten.mm] extern_kernels.mm(arg0_1, reinterpret_tensor(arg1_1, (4, 4), (1, 4), 0), out=buf0) buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [XX], Original ATen: [aten.mm] extern_kernels.mm(arg0_1, reinterpret_tensor(arg0_1, (4, 4), (1, 4), 0), out=buf1) del arg0_1 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [YY], Original ATen: [aten.mm] extern_kernels.mm(arg1_1, reinterpret_tensor(arg1_1, (4, 4), (1, 4), 0), out=buf2) del arg1_1 buf3 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [mul, add, dnorm2], Original ATen: [aten.mul, aten.add] stream0 = get_raw_stream(0) triton_poi_fused_add_mul_0.run(buf3, buf1, buf2, 16, grid=grid(16), stream=stream0) del buf1 del buf2 return (buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import numpy as np def norm_sq(X, Y): XX = X.matmul(X.t()) XY = X.matmul(Y.t()) YY = Y.matmul(Y.t()) return -2 * XY + XX.diag().unsqueeze(1) + YY.diag().unsqueeze(0) class RBF_Kernel(torch.nn.Module): """ RBF kernel :math:`K(x, y) = exp(||x-v||^2 / (2h)) """ def __init__(self, bandwidth=None): super().__init__() self.bandwidth = bandwidth def _bandwidth(self, norm_sq): if self.bandwidth is None: np_dnorm2 = norm_sq.detach().cpu().numpy() h = np.median(np_dnorm2) / (2 * np.log(np_dnorm2.shape[0] + 1)) return np.sqrt(h).item() else: return self.bandwidth def forward(self, X, Y): dnorm2 = norm_sq(X, Y) bandwidth = self._bandwidth(dnorm2) gamma = 1.0 / (1e-08 + 2 * bandwidth ** 2) K_XY = (-gamma * dnorm2).exp() return K_XY def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import numpy as np assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_mul_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr0 + 5 * x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + 5 * x0, xmask, eviction_policy='evict_last') tmp1 = -2.0 tmp2 = tmp0 * tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tl.store(in_out_ptr0 + x2, tmp6, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(arg0_1, reinterpret_tensor(arg1_1, (4, 4), (1, 4), 0), out=buf0) buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(arg0_1, reinterpret_tensor(arg0_1, (4, 4), (1, 4), 0), out=buf1) del arg0_1 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(arg1_1, reinterpret_tensor(arg1_1, (4, 4), (1, 4), 0), out=buf2) del arg1_1 buf3 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_add_mul_0[grid(16)](buf3, buf1, buf2, 16, XBLOCK= 16, num_warps=1, num_stages=1) del buf1 del buf2 return buf3, def norm_sq(X, Y): XX = X.matmul(X.t()) XY = X.matmul(Y.t()) YY = Y.matmul(Y.t()) return -2 * XY + XX.diag().unsqueeze(1) + YY.diag().unsqueeze(0) class RBF_KernelNew(torch.nn.Module): """ RBF kernel :math:`K(x, y) = exp(||x-v||^2 / (2h)) """ def __init__(self, bandwidth=None): super().__init__() self.bandwidth = bandwidth def _bandwidth(self, norm_sq): if self.bandwidth is None: np_dnorm2 = norm_sq.detach().cpu().numpy() h = np.median(np_dnorm2) / (2 * np.log(np_dnorm2.shape[0] + 1)) return np.sqrt(h).item() else: return self.bandwidth def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
JeremyAlain/meta_learning_pacoh
RBF_Kernel
false
5,385
[ "MIT" ]
1
b4c2c37d9715e74542bab556ac1f5d778cc3409c
https://github.com/JeremyAlain/meta_learning_pacoh/tree/b4c2c37d9715e74542bab556ac1f5d778cc3409c
import torch import numpy as np def norm_sq(X, Y): XX = X.matmul(X.t()) XY = X.matmul(Y.t()) YY = Y.matmul(Y.t()) return -2 * XY + XX.diag().unsqueeze(1) + YY.diag().unsqueeze(0) class Model(torch.nn.Module): """ RBF kernel :math:`K(x, y) = exp(||x-v||^2 / (2h)) """ def __init__(self, bandwidth=None): super().__init__() self.bandwidth = bandwidth def _bandwidth(self, norm_sq): if self.bandwidth is None: np_dnorm2 = norm_sq.detach().cpu().numpy() h = np.median(np_dnorm2) / (2 * np.log(np_dnorm2.shape[0] + 1)) return np.sqrt(h).item() else: return self.bandwidth def forward(self, X, Y): dnorm2 = norm_sq(X, Y) bandwidth = self._bandwidth(dnorm2) gamma = 1.0 / (1e-08 + 2 * bandwidth ** 2) K_XY = (-gamma * dnorm2).exp() return K_XY def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return []
CustomNet
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/jq/cjqaq2meov3vkcgfealq7w4w35tw2oemvmhneuxmigeoumva22p7.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.sigmoid] # Source node to ATen node mapping: # x_1 => sigmoid # Graph fragment: # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%view_1,), kwargs = {}) triton_poi_fused_sigmoid_0 = async_compile.triton('triton_poi_fused_sigmoid_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_sigmoid_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tl.store(in_out_ptr0 + (x2), tmp3, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.sigmoid] stream0 = get_raw_stream(0) triton_poi_fused_sigmoid_0.run(buf1, primals_2, 256, grid=grid(256), stream=stream0) del primals_2 return (buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class CustomNet(nn.Module): """ A network with a fully connected layer followed by a sigmoid layer. This is used for testing customized operation handles. """ def __init__(self, input_dim: 'int', output_dim: 'int') ->None: super(CustomNet, self).__init__() self.conv = nn.Linear(input_dim, output_dim) self.sigmoid = nn.Sigmoid() def forward(self, x: 'torch.Tensor') ->torch.Tensor: x = self.conv(x) x = self.sigmoid(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_dim': 4, 'output_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_sigmoid_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_sigmoid_0[grid(256)](buf1, primals_2, 256, XBLOCK= 128, num_warps=4, num_stages=1) del primals_2 return buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf1 class CustomNetNew(nn.Module): """ A network with a fully connected layer followed by a sigmoid layer. This is used for testing customized operation handles. """ def __init__(self, input_dim: 'int', output_dim: 'int') ->None: super(CustomNetNew, self).__init__() self.conv = nn.Linear(input_dim, output_dim) self.sigmoid = nn.Sigmoid() def forward(self, input_0): primals_1 = self.conv.weight primals_2 = self.conv.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
Jennifer-Rigdon/fvcore
CustomNet
false
5,386
[ "Apache-2.0" ]
1
7e800a86f2df93da017e07380543b4060ab88c94
https://github.com/Jennifer-Rigdon/fvcore/tree/7e800a86f2df93da017e07380543b4060ab88c94
import torch import torch.nn as nn class Model(nn.Module): """ A network with a fully connected layer followed by a sigmoid layer. This is used for testing customized operation handles. """ def __init__(self, input_dim: 'int', output_dim: 'int') ->None: super().__init__() self.conv = nn.Linear(input_dim, output_dim) self.sigmoid = nn.Sigmoid() def forward(self, x: 'torch.Tensor') ->torch.Tensor: x = self.conv(x) x = self.sigmoid(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 4]
Decoder
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/qb/cqb3vtiomads7o6ymnn3sjvoftqw2fxs7kcu255cksqkcauhxdnm.py # Topologically Sorted Source Nodes: [est_source], Original ATen: [aten.clone] # Source node to ATen node mapping: # est_source => clone # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 4 y1 = (yindex // 4) tmp0 = tl.load(in_ptr0 + (x2 + (4*y3)), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x2 + (4*y0)), xmask & ymask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + (y0 + (4*x2) + (16*y1)), tmp2, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/ya/cyansxctlvhajqdzgxv2kjzkrfue2yk5b7zcghioinbkozfplw4p.py # Topologically Sorted Source Nodes: [contiguous_1], Original ATen: [aten.clone] # Source node to ATen node mapping: # contiguous_1 => clone_2 # Graph fragment: # %clone_2 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%unfold,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_1 = async_compile.triton('triton_poi_fused_clone_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8], filename=__file__, triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_1(out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 8 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 2 x1 = (xindex // 2) x2 = xindex tmp0 = x0 + x1 tl.store(out_ptr0 + (x2), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/rq/crqzm22z7mq3o7bwfiiomiykhemwodzsakf2v6h7bjpdqhfbuurg.py # Topologically Sorted Source Nodes: [result], Original ATen: [aten.new_zeros] # Source node to ATen node mapping: # result => full_default # Graph fragment: # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 1, 4, 5, 2], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) triton_poi_fused_new_zeros_2 = async_compile.triton('triton_poi_fused_new_zeros_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_new_zeros_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_new_zeros_2(out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 160 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = 0.0 tl.store(out_ptr0 + (x0), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/bv/cbvi5v4haqxjalbdx6io67jiarq7hydzbfzpcz27hjo3ppc7xq3x.py # Topologically Sorted Source Nodes: [result, index_add_], Original ATen: [aten.new_zeros, aten.index_add] # Source node to ATen node mapping: # index_add_ => index_put # result => full_default # Graph fragment: # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 1, 4, 5, 2], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %index_put : [num_users=1] = call_function[target=torch.ops.aten.index_put_.default](args = (%full_default, [None, None, None, %view_4], %view_3, True), kwargs = {}) triton_poi_fused_index_add_new_zeros_3 = async_compile.triton('triton_poi_fused_index_add_new_zeros_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*i64', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_index_add_new_zeros_3', 'mutated_arg_names': ['out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_index_add_new_zeros_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 2) % 8 x2 = (xindex // 16) % 4 x3 = (xindex // 64) x4 = xindex % 16 x0 = xindex % 2 x5 = (xindex // 16) tmp0 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr1 + ((4*x2) + (16*(x4 // 4)) + (64*x3) + (64*((x4 + (16*x2)) // 64)) + (x4 % 4)), xmask) tmp1 = tl.full([XBLOCK], 5, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tl.device_assert(((0 <= tmp4) & (tmp4 < 5)) | ~(xmask), "index out of bounds: 0 <= tmp4 < 5") tl.atomic_add(out_ptr0 + (x0 + (2*tmp4) + (10*x5)), tmp6, xmask, sem='relaxed') ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (16, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 4, 4), (16, 1, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [est_source], Original ATen: [aten.clone] stream0 = get_raw_stream(0) triton_poi_fused_clone_0.run(primals_1, primals_2, buf0, 16, 4, grid=grid(16, 4), stream=stream0) del primals_1 del primals_2 buf1 = empty_strided_cuda((16, 16), (16, 1), torch.float32) # Topologically Sorted Source Nodes: [est_source], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 16), (1, 4), 0), out=buf1) del primals_3 buf2 = empty_strided_cuda((4, 2), (2, 1), torch.int64) # Topologically Sorted Source Nodes: [contiguous_1], Original ATen: [aten.clone] triton_poi_fused_clone_1.run(buf2, 8, grid=grid(8), stream=stream0) buf3 = empty_strided_cuda((4, 1, 4, 5, 2), (40, 40, 10, 2, 1), torch.float32) # Topologically Sorted Source Nodes: [result], Original ATen: [aten.new_zeros] triton_poi_fused_new_zeros_2.run(buf3, 160, grid=grid(160), stream=stream0) # Topologically Sorted Source Nodes: [result, index_add_], Original ATen: [aten.new_zeros, aten.index_add] triton_poi_fused_index_add_new_zeros_3.run(buf2, buf1, buf3, 256, grid=grid(256), stream=stream0) del buf1 return (reinterpret_tensor(buf3, (4, 1, 4, 10), (40, 40, 10, 1), 0), reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(buf2, (8, ), (1, ), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch from torch import nn import torch.hub def overlap_and_add(signal, frame_step): outer_dimensions = signal.size()[:-2] frames, frame_length = signal.size()[-2:] subframe_length = math.gcd(frame_length, frame_step) subframe_step = frame_step // subframe_length subframes_per_frame = frame_length // subframe_length output_size = frame_step * (frames - 1) + frame_length output_subframes = output_size // subframe_length subframe_signal = signal.view(*outer_dimensions, -1, subframe_length) frame = torch.arange(0, output_subframes, device=signal.device).unfold( 0, subframes_per_frame, subframe_step) frame = frame.long() frame = frame.contiguous().view(-1) result = signal.new_zeros(*outer_dimensions, output_subframes, subframe_length) result.index_add_(-2, frame, subframe_signal) result = result.view(*outer_dimensions, -1) return result class Decoder(nn.Module): def __init__(self, N, L, audio_channels): super(Decoder, self).__init__() self.N, self.L = N, L self.audio_channels = audio_channels self.basis_signals = nn.Linear(N, audio_channels * L, bias=False) def forward(self, mixture_w, est_mask): """ Args: mixture_w: [M, N, K] est_mask: [M, C, N, K] Returns: est_source: [M, C, T] """ source_w = torch.unsqueeze(mixture_w, 1) * est_mask source_w = torch.transpose(source_w, 2, 3) est_source = self.basis_signals(source_w) m, c, k, _ = est_source.size() est_source = est_source.view(m, c, k, self.audio_channels, -1 ).transpose(2, 3).contiguous() est_source = overlap_and_add(est_source, self.L // 2) return est_source def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'N': 4, 'L': 4, 'audio_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import math from torch import nn import torch.hub assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 4 y1 = yindex // 4 tmp0 = tl.load(in_ptr0 + (x2 + 4 * y3), xmask & ymask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr1 + (x2 + 4 * y0), xmask & ymask, eviction_policy= 'evict_last') tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + (y0 + 4 * x2 + 16 * y1), tmp2, xmask & ymask) @triton.jit def triton_poi_fused_clone_1(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 8 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 2 x1 = xindex // 2 x2 = xindex tmp0 = x0 + x1 tl.store(out_ptr0 + x2, tmp0, xmask) @triton.jit def triton_poi_fused_new_zeros_2(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 160 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = 0.0 tl.store(out_ptr0 + x0, tmp0, xmask) @triton.jit def triton_poi_fused_index_add_new_zeros_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 2 % 8 x2 = xindex // 16 % 4 x3 = xindex // 64 x4 = xindex % 16 x0 = xindex % 2 x5 = xindex // 16 tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr1 + (4 * x2 + 16 * (x4 // 4) + 64 * x3 + 64 * ((x4 + 16 * x2) // 64) + x4 % 4), xmask) tmp1 = tl.full([XBLOCK], 5, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tl.device_assert((0 <= tmp4) & (tmp4 < 5) | ~xmask, 'index out of bounds: 0 <= tmp4 < 5') tl.atomic_add(out_ptr0 + (x0 + 2 * tmp4 + 10 * x5), tmp6, xmask, sem= 'relaxed') def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (16, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 4, 4), (16, 1, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(16, 4)](primals_1, primals_2, buf0, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) del primals_1 del primals_2 buf1 = empty_strided_cuda((16, 16), (16, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 16), (1, 4), 0), out=buf1) del primals_3 buf2 = empty_strided_cuda((4, 2), (2, 1), torch.int64) triton_poi_fused_clone_1[grid(8)](buf2, 8, XBLOCK=8, num_warps=1, num_stages=1) buf3 = empty_strided_cuda((4, 1, 4, 5, 2), (40, 40, 10, 2, 1), torch.float32) triton_poi_fused_new_zeros_2[grid(160)](buf3, 160, XBLOCK=256, num_warps=4, num_stages=1) triton_poi_fused_index_add_new_zeros_3[grid(256)](buf2, buf1, buf3, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf1 return reinterpret_tensor(buf3, (4, 1, 4, 10), (40, 40, 10, 1), 0 ), reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor( buf2, (8,), (1,), 0) def overlap_and_add(signal, frame_step): outer_dimensions = signal.size()[:-2] frames, frame_length = signal.size()[-2:] subframe_length = math.gcd(frame_length, frame_step) subframe_step = frame_step // subframe_length subframes_per_frame = frame_length // subframe_length output_size = frame_step * (frames - 1) + frame_length output_subframes = output_size // subframe_length subframe_signal = signal.view(*outer_dimensions, -1, subframe_length) frame = torch.arange(0, output_subframes, device=signal.device).unfold( 0, subframes_per_frame, subframe_step) frame = frame.long() frame = frame.contiguous().view(-1) result = signal.new_zeros(*outer_dimensions, output_subframes, subframe_length) result.index_add_(-2, frame, subframe_signal) result = result.view(*outer_dimensions, -1) return result class DecoderNew(nn.Module): def __init__(self, N, L, audio_channels): super(DecoderNew, self).__init__() self.N, self.L = N, L self.audio_channels = audio_channels self.basis_signals = nn.Linear(N, audio_channels * L, bias=False) def forward(self, input_0, input_1): primals_3 = self.basis_signals.weight primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3]) return output[0]
JavierCane/demucs
Decoder
false
5,387
[ "MIT" ]
1
01d14844a71be7b5d86adf06a8501a951157c3fe
https://github.com/JavierCane/demucs/tree/01d14844a71be7b5d86adf06a8501a951157c3fe
import math import torch from torch import nn import torch.hub def overlap_and_add(signal, frame_step): outer_dimensions = signal.size()[:-2] frames, frame_length = signal.size()[-2:] subframe_length = math.gcd(frame_length, frame_step) subframe_step = frame_step // subframe_length subframes_per_frame = frame_length // subframe_length output_size = frame_step * (frames - 1) + frame_length output_subframes = output_size // subframe_length subframe_signal = signal.view(*outer_dimensions, -1, subframe_length) frame = torch.arange(0, output_subframes, device=signal.device).unfold( 0, subframes_per_frame, subframe_step) frame = frame.long() frame = frame.contiguous().view(-1) result = signal.new_zeros(*outer_dimensions, output_subframes, subframe_length) result.index_add_(-2, frame, subframe_signal) result = result.view(*outer_dimensions, -1) return result class Model(nn.Module): def __init__(self, N, L, audio_channels): super().__init__() self.N, self.L = N, L self.audio_channels = audio_channels self.basis_signals = nn.Linear(N, audio_channels * L, bias=False) def forward(self, mixture_w, est_mask): """ Args: mixture_w: [M, N, K] est_mask: [M, C, N, K] Returns: est_source: [M, C, T] """ source_w = torch.unsqueeze(mixture_w, 1) * est_mask source_w = torch.transpose(source_w, 2, 3) est_source = self.basis_signals(source_w) m, c, k, _ = est_source.size() est_source = est_source.view(m, c, k, self.audio_channels, -1 ).transpose(2, 3).contiguous() est_source = overlap_and_add(est_source, self.L // 2) return est_source def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [4, 4, 4]
IMQSteinKernel
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/lt/clt4cux4p3nctqnicttahtpztlsfonwjafpks36y7frjsc3mtjob.py # Topologically Sorted Source Nodes: [sub, norm_sq], Original ATen: [aten.sub, aten.pow] # Source node to ATen node mapping: # norm_sq => pow_1 # sub => sub # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%unsqueeze, %unsqueeze_1), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {}) triton_poi_fused_pow_sub_0 = async_compile.triton('triton_poi_fused_pow_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_pow_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_pow_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex % 16 x0 = xindex % 4 x2 = (xindex // 16) x4 = xindex tmp0 = tl.load(in_ptr0 + (x3), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x0 + (4*x2)), xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tl.store(out_ptr0 + (x4), tmp3, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [sub, norm_sq], Original ATen: [aten.sub, aten.pow] stream0 = get_raw_stream(0) triton_poi_fused_pow_sub_0.run(arg0_1, arg1_1, buf0, 64, grid=grid(64), stream=stream0) del arg0_1 del arg1_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch def norm_sq(X, Y): XX = X.matmul(X.t()) XY = X.matmul(Y.t()) YY = Y.matmul(Y.t()) return -2 * XY + XX.diag().unsqueeze(1) + YY.diag().unsqueeze(0) class IMQSteinKernel(torch.nn.Module): """ IMQ (inverse multi-quadratic) kernel :math:`K(x, y) = (\\alpha + ||x-y||^2/h)^{\\beta}` """ def __init__(self, alpha=0.5, beta=-0.5, bandwidth=None): super(IMQSteinKernel, self).__init__() assert alpha > 0.0, 'alpha must be positive.' assert beta < 0.0, 'beta must be negative.' self.alpha = alpha self.beta = beta self.bandwidth = bandwidth def _bandwidth(self, norm_sq): """ Compute the bandwidth along each dimension using the median pairwise squared distance between particles. """ if self.bandwidth is None: num_particles = norm_sq.size(0) index = torch.arange(num_particles) norm_sq = norm_sq[index > index.unsqueeze(-1), ...] median = norm_sq.median(dim=0)[0] assert median.shape == norm_sq.shape[-1:] return median / math.log(num_particles + 1) else: return self.bandwidth def forward(self, X, Y): norm_sq = (X.unsqueeze(0) - Y.unsqueeze(1)) ** 2 assert norm_sq.dim() == 3 bandwidth = self._bandwidth(norm_sq) base_term = self.alpha + torch.sum(norm_sq / bandwidth, dim=-1) log_kernel = self.beta * torch.log(base_term) return log_kernel.exp() def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import math assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_pow_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex % 16 x0 = xindex % 4 x2 = xindex // 16 x4 = xindex tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tl.store(out_ptr0 + x4, tmp3, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_pow_sub_0[grid(64)](arg0_1, arg1_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 del arg1_1 return buf0, def norm_sq(X, Y): XX = X.matmul(X.t()) XY = X.matmul(Y.t()) YY = Y.matmul(Y.t()) return -2 * XY + XX.diag().unsqueeze(1) + YY.diag().unsqueeze(0) class IMQSteinKernelNew(torch.nn.Module): """ IMQ (inverse multi-quadratic) kernel :math:`K(x, y) = (\\alpha + ||x-y||^2/h)^{\\beta}` """ def __init__(self, alpha=0.5, beta=-0.5, bandwidth=None): super(IMQSteinKernelNew, self).__init__() assert alpha > 0.0, 'alpha must be positive.' assert beta < 0.0, 'beta must be negative.' self.alpha = alpha self.beta = beta self.bandwidth = bandwidth def _bandwidth(self, norm_sq): """ Compute the bandwidth along each dimension using the median pairwise squared distance between particles. """ if self.bandwidth is None: num_particles = norm_sq.size(0) index = torch.arange(num_particles) norm_sq = norm_sq[index > index.unsqueeze(-1), ...] median = norm_sq.median(dim=0)[0] assert median.shape == norm_sq.shape[-1:] return median / math.log(num_particles + 1) else: return self.bandwidth def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
JeremyAlain/meta_learning_pacoh
IMQSteinKernel
false
5,388
[ "MIT" ]
1
b4c2c37d9715e74542bab556ac1f5d778cc3409c
https://github.com/JeremyAlain/meta_learning_pacoh/tree/b4c2c37d9715e74542bab556ac1f5d778cc3409c
import math import torch def norm_sq(X, Y): XX = X.matmul(X.t()) XY = X.matmul(Y.t()) YY = Y.matmul(Y.t()) return -2 * XY + XX.diag().unsqueeze(1) + YY.diag().unsqueeze(0) class Model(torch.nn.Module): """ IMQ (inverse multi-quadratic) kernel :math:`K(x, y) = (\\alpha + ||x-y||^2/h)^{\\beta}` """ def __init__(self, alpha=0.5, beta=-0.5, bandwidth=None): super().__init__() assert alpha > 0.0, 'alpha must be positive.' assert beta < 0.0, 'beta must be negative.' self.alpha = alpha self.beta = beta self.bandwidth = bandwidth def _bandwidth(self, norm_sq): """ Compute the bandwidth along each dimension using the median pairwise squared distance between particles. """ if self.bandwidth is None: num_particles = norm_sq.size(0) index = torch.arange(num_particles) norm_sq = norm_sq[index > index.unsqueeze(-1), ...] median = norm_sq.median(dim=0)[0] assert median.shape == norm_sq.shape[-1:] return median / math.log(num_particles + 1) else: return self.bandwidth def forward(self, X, Y): norm_sq = (X.unsqueeze(0) - Y.unsqueeze(1)) ** 2 assert norm_sq.dim() == 3 bandwidth = self._bandwidth(norm_sq) base_term = self.alpha + torch.sum(norm_sq / bandwidth, dim=-1) log_kernel = self.beta * torch.log(base_term) return log_kernel.exp() def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return []
Actor
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/ix/cixxyusyg44s2hkoufcgbrv3ix5ookwqjl4ia3xkv7bdqi4yrzus.py # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # out_1 => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) # %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32768], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 25600 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 400 x2 = xindex % 1600 x3 = (xindex // 1600) tmp0 = tl.load(in_out_ptr0 + (x4), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x4), tmp4, xmask) tl.store(out_ptr0 + (x2 + (1664*x3)), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/op/coptu6xep3awc4lajb4xivopppqmjtx3zy7ebtazm45rqvyeknds.py # Topologically Sorted Source Nodes: [out_3], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # out_3 => relu_1 # Graph fragment: # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_3,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_relu_threshold_backward_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32768], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 19200 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 300 x2 = (xindex // 1200) x3 = xindex % 1200 tmp0 = tl.load(in_ptr0 + (x4), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x3 + (1216*x2)), tmp4, xmask) tl.store(out_ptr1 + (x3 + (1280*x2)), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/as/casrc7bf7ghsendgi7tkqxk3hj4ic6aqb4rmkxzuk5dhbidznia7.py # Topologically Sorted Source Nodes: [out_3, out_4], Original ATen: [aten.relu, aten.view] # Source node to ATen node mapping: # out_3 => relu_1 # out_4 => view_4 # Graph fragment: # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_3,), kwargs = {}) # %view_4 : [num_users=2] = call_function[target=torch.ops.aten.reshape.default](args = (%relu_1, [64, 300]), kwargs = {}) triton_poi_fused_relu_view_2 = async_compile.triton('triton_poi_fused_relu_view_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32768], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_view_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_view_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 19200 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 300 x1 = (xindex // 300) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (300*(x1 % 4)) + (1216*(x1 // 4))), xmask) tl.store(out_ptr0 + (x2), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/sb/csbbn2sf7v6eutogmmhksp5x7rwd7z3vppgfx2mbu7v3njic7bqh.py # Topologically Sorted Source Nodes: [out_5], Original ATen: [aten.tanh] # Source node to ATen node mapping: # out_5 => tanh # Graph fragment: # %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%view_5,), kwargs = {}) triton_poi_fused_tanh_3 = async_compile.triton('triton_poi_fused_tanh_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_tanh_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + (x2), tmp3, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (400, 4), (4, 1)) assert_size_stride(primals_2, (400, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (300, 400), (400, 1)) assert_size_stride(primals_5, (300, ), (1, )) assert_size_stride(primals_6, (4, 300), (300, 1)) assert_size_stride(primals_7, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 400), (400, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 400), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 400), (6400, 1600, 400, 1), 0); del buf0 # reuse buf8 = empty_strided_cuda((4, 4, 4, 400), (6656, 1664, 400, 1), torch.bool) # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf8, 25600, grid=grid(25600), stream=stream0) del primals_2 buf2 = empty_strided_cuda((64, 300), (300, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf1, (64, 400), (400, 1), 0), reinterpret_tensor(primals_4, (400, 300), (1, 400), 0), out=buf2) buf3 = empty_strided_cuda((4, 4, 4, 300), (4864, 1216, 300, 1), torch.float32) buf7 = empty_strided_cuda((4, 4, 4, 300), (5120, 1280, 300, 1), torch.bool) # Topologically Sorted Source Nodes: [out_3], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_1.run(buf2, primals_5, buf3, buf7, 19200, grid=grid(19200), stream=stream0) del primals_5 buf4 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [out_3, out_4], Original ATen: [aten.relu, aten.view] triton_poi_fused_relu_view_2.run(buf3, buf4, 19200, grid=grid(19200), stream=stream0) del buf3 buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf4, reinterpret_tensor(primals_6, (300, 4), (1, 300), 0), out=buf5) buf6 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf5 # reuse # Topologically Sorted Source Nodes: [out_5], Original ATen: [aten.tanh] triton_poi_fused_tanh_3.run(buf6, primals_7, 256, grid=grid(256), stream=stream0) del primals_7 return (buf6, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 400), (400, 1), 0), buf4, buf6, primals_6, buf7, primals_4, buf8, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((400, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((400, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((300, 400), (400, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((300, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 300), (300, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import numpy as np import torch.nn as nn def fanin_init(size, fanin=None): fanin = fanin or size[0] v = 1.0 / np.sqrt(fanin) return torch.Tensor(size).uniform_(-v, v) class Actor(nn.Module): def __init__(self, nb_states, nb_actions, hidden1=400, hidden2=300, init_w=0.003): super(Actor, self).__init__() self.fc1 = nn.Linear(nb_states, hidden1) self.fc2 = nn.Linear(hidden1, hidden2) self.fc3 = nn.Linear(hidden2, nb_actions) self.relu = nn.ReLU() self.tanh = nn.Tanh() self.init_weights(init_w) def init_weights(self, init_w): self.fc1.weight.data = fanin_init(self.fc1.weight.data.size()) self.fc2.weight.data = fanin_init(self.fc2.weight.data.size()) self.fc3.weight.data.uniform_(-init_w, init_w) def forward(self, x): out = self.fc1(x) out = self.relu(out) out = self.fc2(out) out = self.relu(out) out = self.fc3(out) out = self.tanh(out) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'nb_states': 4, 'nb_actions': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import numpy as np import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 25600 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 400 x2 = xindex % 1600 x3 = xindex // 1600 tmp0 = tl.load(in_out_ptr0 + x4, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x4, tmp4, xmask) tl.store(out_ptr0 + (x2 + 1664 * x3), tmp6, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 19200 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 300 x2 = xindex // 1200 x3 = xindex % 1200 tmp0 = tl.load(in_ptr0 + x4, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x3 + 1216 * x2), tmp4, xmask) tl.store(out_ptr1 + (x3 + 1280 * x2), tmp6, xmask) @triton.jit def triton_poi_fused_relu_view_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 19200 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 300 x1 = xindex // 300 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 300 * (x1 % 4) + 1216 * (x1 // 4)), xmask) tl.store(out_ptr0 + x2, tmp0, xmask) @triton.jit def triton_poi_fused_tanh_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (400, 4), (4, 1)) assert_size_stride(primals_2, (400,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (300, 400), (400, 1)) assert_size_stride(primals_5, (300,), (1,)) assert_size_stride(primals_6, (4, 300), (300, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 400), (400, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 400), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 400), (6400, 1600, 400, 1), 0 ) del buf0 buf8 = empty_strided_cuda((4, 4, 4, 400), (6656, 1664, 400, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(25600)](buf1, primals_2, buf8, 25600, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 300), (300, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 400), (400, 1), 0), reinterpret_tensor(primals_4, (400, 300), (1, 400), 0), out=buf2) buf3 = empty_strided_cuda((4, 4, 4, 300), (4864, 1216, 300, 1), torch.float32) buf7 = empty_strided_cuda((4, 4, 4, 300), (5120, 1280, 300, 1), torch.bool) triton_poi_fused_relu_threshold_backward_1[grid(19200)](buf2, primals_5, buf3, buf7, 19200, XBLOCK=256, num_warps=4, num_stages=1 ) del primals_5 buf4 = buf2 del buf2 triton_poi_fused_relu_view_2[grid(19200)](buf3, buf4, 19200, XBLOCK =128, num_warps=4, num_stages=1) del buf3 buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(buf4, reinterpret_tensor(primals_6, (300, 4), (1, 300), 0), out=buf5) buf6 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf5 triton_poi_fused_tanh_3[grid(256)](buf6, primals_7, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_7 return buf6, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 400), (400, 1), 0 ), buf4, buf6, primals_6, buf7, primals_4, buf8 def fanin_init(size, fanin=None): fanin = fanin or size[0] v = 1.0 / np.sqrt(fanin) return torch.Tensor(size).uniform_(-v, v) class ActorNew(nn.Module): def __init__(self, nb_states, nb_actions, hidden1=400, hidden2=300, init_w=0.003): super(ActorNew, self).__init__() self.fc1 = nn.Linear(nb_states, hidden1) self.fc2 = nn.Linear(hidden1, hidden2) self.fc3 = nn.Linear(hidden2, nb_actions) self.relu = nn.ReLU() self.tanh = nn.Tanh() self.init_weights(init_w) def init_weights(self, init_w): self.fc1.weight.data = fanin_init(self.fc1.weight.data.size()) self.fc2.weight.data = fanin_init(self.fc2.weight.data.size()) self.fc3.weight.data.uniform_(-init_w, init_w) def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_6 = self.fc3.weight primals_7 = self.fc3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
JackYangzg/pytorch-ddpg
Actor
false
5,389
[ "Apache-2.0" ]
1
96838a40dd6992a0a18065a5edafbefc6bb0ac69
https://github.com/JackYangzg/pytorch-ddpg/tree/96838a40dd6992a0a18065a5edafbefc6bb0ac69
import torch import numpy as np import torch.nn as nn def fanin_init(size, fanin=None): fanin = fanin or size[0] v = 1.0 / np.sqrt(fanin) return torch.Tensor(size).uniform_(-v, v) class Model(nn.Module): def __init__(self, nb_states, nb_actions, hidden1=400, hidden2=300, init_w=0.003): super().__init__() self.fc1 = nn.Linear(nb_states, hidden1) self.fc2 = nn.Linear(hidden1, hidden2) self.fc3 = nn.Linear(hidden2, nb_actions) self.relu = nn.ReLU() self.tanh = nn.Tanh() self.init_weights(init_w) def init_weights(self, init_w): self.fc1.weight.data = fanin_init(self.fc1.weight.data.size()) self.fc2.weight.data = fanin_init(self.fc2.weight.data.size()) self.fc3.weight.data.uniform_(-init_w, init_w) def forward(self, x): out = self.fc1(x) out = self.relu(out) out = self.fc2(out) out = self.relu(out) out = self.fc3(out) out = self.tanh(out) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 4]
MuSigmaEncoder
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/r3/cr3febcwm3t44fuoitsx3ou2p6xg4sk4f7unagmmrvffasxf47te.py # Topologically Sorted Source Nodes: [hidden], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # hidden => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, xmask) tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/hh/chhqj73wauyyzrvkvg6ib7lh5drbg7bq7ddygbqpehxytxvjdtkd.py # Topologically Sorted Source Nodes: [sigmoid, mul, sigma], Original ATen: [aten.sigmoid, aten.mul, aten.add] # Source node to ATen node mapping: # mul => mul # sigma => add # sigmoid => sigmoid # Graph fragment: # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%view_5,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, 0.9), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, 0.1), kwargs = {}) triton_poi_fused_add_mul_sigmoid_1 = async_compile.triton('triton_poi_fused_add_mul_sigmoid_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_sigmoid_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_sigmoid_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl.sigmoid(tmp0) tmp2 = 0.9 tmp3 = tmp1 * tmp2 tmp4 = 0.1 tmp5 = tmp3 + tmp4 tl.store(out_ptr0 + (x0), tmp5, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [hidden], Original ATen: [aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf5, 256, grid=grid(256), stream=stream0) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [mu], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_5 buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.addmm] extern_kernels.addmm(primals_7, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf3) del primals_7 buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [sigmoid, mul, sigma], Original ATen: [aten.sigmoid, aten.mul, aten.add] triton_poi_fused_add_mul_sigmoid_1.run(buf3, buf4, 256, grid=grid(256), stream=stream0) return (reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0), buf4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), buf3, primals_6, primals_4, buf5, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class MuSigmaEncoder(nn.Module): """ Maps a representation r to mu and sigma which will define the normal distribution from which we sample the latent variable z. Parameters ---------- r_dim : int Dimension of output representation r. z_dim : int Dimension of latent variable z. """ def __init__(self, r_dim, z_dim): super(MuSigmaEncoder, self).__init__() self.r_dim = r_dim self.z_dim = z_dim self.r_to_hidden = nn.Linear(r_dim, r_dim) self.hidden_to_mu = nn.Linear(r_dim, z_dim) self.hidden_to_sigma = nn.Linear(r_dim, z_dim) def forward(self, r): """ r : torch.Tensor Shape (batch_size, r_dim) """ hidden = torch.relu(self.r_to_hidden(r)) mu = self.hidden_to_mu(hidden) sigma = 0.1 + 0.9 * torch.sigmoid(self.hidden_to_sigma(hidden)) return mu, sigma def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'r_dim': 4, 'z_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_add_mul_sigmoid_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.sigmoid(tmp0) tmp2 = 0.9 tmp3 = tmp1 * tmp2 tmp4 = 0.1 tmp5 = tmp3 + tmp4 tl.store(out_ptr0 + x0, tmp5, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1, primals_2, buf5, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_5 buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf1, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf3) del primals_7 buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_mul_sigmoid_1[grid(256)](buf3, buf4, 256, XBLOCK=256, num_warps=4, num_stages=1) return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), buf4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 4), (4, 1), 0 ), buf3, primals_6, primals_4, buf5 class MuSigmaEncoderNew(nn.Module): """ Maps a representation r to mu and sigma which will define the normal distribution from which we sample the latent variable z. Parameters ---------- r_dim : int Dimension of output representation r. z_dim : int Dimension of latent variable z. """ def __init__(self, r_dim, z_dim): super(MuSigmaEncoderNew, self).__init__() self.r_dim = r_dim self.z_dim = z_dim self.r_to_hidden = nn.Linear(r_dim, r_dim) self.hidden_to_mu = nn.Linear(r_dim, z_dim) self.hidden_to_sigma = nn.Linear(r_dim, z_dim) def forward(self, input_0): primals_1 = self.r_to_hidden.weight primals_2 = self.r_to_hidden.bias primals_4 = self.hidden_to_mu.weight primals_5 = self.hidden_to_mu.bias primals_6 = self.hidden_to_sigma.weight primals_7 = self.hidden_to_sigma.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0], output[1]
JeremyAlain/meta_learning_pacoh
MuSigmaEncoder
false
5,390
[ "MIT" ]
1
b4c2c37d9715e74542bab556ac1f5d778cc3409c
https://github.com/JeremyAlain/meta_learning_pacoh/tree/b4c2c37d9715e74542bab556ac1f5d778cc3409c
import torch import torch.nn as nn class Model(nn.Module): """ Maps a representation r to mu and sigma which will define the normal distribution from which we sample the latent variable z. Parameters ---------- r_dim : int Dimension of output representation r. z_dim : int Dimension of latent variable z. """ def __init__(self, r_dim, z_dim): super().__init__() self.r_dim = r_dim self.z_dim = z_dim self.r_to_hidden = nn.Linear(r_dim, r_dim) self.hidden_to_mu = nn.Linear(r_dim, z_dim) self.hidden_to_sigma = nn.Linear(r_dim, z_dim) def forward(self, r): """ r : torch.Tensor Shape (batch_size, r_dim) """ hidden = torch.relu(self.r_to_hidden(r)) mu = self.hidden_to_mu(hidden) sigma = 0.1 + 0.9 * torch.sigmoid(self.hidden_to_sigma(hidden)) return mu, sigma def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 4]
SmallConvNet
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/q7/cq7qwv755rskgi3fxmqbrnzfm6sxg6uprg2cozcqvgaiyr3e5jdv.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] # Source node to ATen node mapping: # x => convolution # Graph fragment: # %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 8 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/au/cau4pihcaptiev5y2ewn2o2nvrwhk7hogc72cofmmtbyv4rxc2oy.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution] # Source node to ATen node mapping: # x_1 => convolution_1 # Graph fragment: # %convolution_1 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%convolution, %primals_4, %primals_5, [2, 2], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 4) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/lo/clouxqhuqmboteb7g6sjkd7wz3mv37jkemznfw6gvoz6ecgjmejm.py # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution] # Source node to ATen node mapping: # x_2 => convolution_2 # Graph fragment: # %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%convolution_1, %primals_6, %primals_7, [2, 2], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_2 = async_compile.triton('triton_poi_fused_convolution_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 8 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 2 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x2), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (8, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_2, (8, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 8, 1, 1), (8, 1, 1, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (2, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_7, (2, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 8, 4, 4), (128, 16, 4, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] stream0 = get_raw_stream(0) triton_poi_fused_convolution_0.run(buf1, primals_2, 512, grid=grid(512), stream=stream0) del primals_2 # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(buf1, primals_4, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 2, 2), (16, 4, 2, 1)) buf3 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution] triton_poi_fused_convolution_1.run(buf3, primals_5, 64, grid=grid(64), stream=stream0) del primals_5 # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution] buf4 = extern_kernels.convolution(buf3, primals_6, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 2, 1, 1), (2, 1, 1, 1)) buf5 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution] triton_poi_fused_convolution_2.run(buf5, primals_7, 8, grid=grid(8), stream=stream0) del primals_7 return (buf5, primals_1, primals_3, primals_4, primals_6, buf1, buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((8, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 8, 1, 1), (8, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((2, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn from numpy import prod class SmallConvNet(nn.Module): """ A network with three conv layers. This is used for testing convolution layers for activation count. """ def __init__(self, input_dim: 'int') ->None: super(SmallConvNet, self).__init__() conv_dim1 = 8 conv_dim2 = 4 conv_dim3 = 2 self.conv1 = nn.Conv2d(input_dim, conv_dim1, 1, 1) self.conv2 = nn.Conv2d(conv_dim1, conv_dim2, 1, 2) self.conv3 = nn.Conv2d(conv_dim2, conv_dim3, 1, 2) def forward(self, x: 'torch.Tensor') ->torch.Tensor: x = self.conv1(x) x = self.conv2(x) x = self.conv3(x) return x def get_gt_activation(self, x: 'torch.Tensor') ->int: count = 0 x = self.conv1(x) count += prod(list(x.size())) x = self.conv2(x) count += prod(list(x.size())) x = self.conv3(x) count += prod(list(x.size())) return count def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn from numpy import prod assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 8 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 8 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 2 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (8, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_2, (8,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 8, 1, 1), (8, 1, 1, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (2, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_7, (2,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 8, 4, 4), (128, 16, 4, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(512)](buf1, primals_2, 512, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 2, 2), (16, 4, 2, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_1[grid(64)](buf3, primals_5, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_5 buf4 = extern_kernels.convolution(buf3, primals_6, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 2, 1, 1), (2, 1, 1, 1)) buf5 = buf4 del buf4 triton_poi_fused_convolution_2[grid(8)](buf5, primals_7, 8, XBLOCK= 8, num_warps=1, num_stages=1) del primals_7 return buf5, primals_1, primals_3, primals_4, primals_6, buf1, buf3 class SmallConvNetNew(nn.Module): """ A network with three conv layers. This is used for testing convolution layers for activation count. """ def __init__(self, input_dim: 'int') ->None: super(SmallConvNetNew, self).__init__() conv_dim1 = 8 conv_dim2 = 4 conv_dim3 = 2 self.conv1 = nn.Conv2d(input_dim, conv_dim1, 1, 1) self.conv2 = nn.Conv2d(conv_dim1, conv_dim2, 1, 2) self.conv3 = nn.Conv2d(conv_dim2, conv_dim3, 1, 2) def get_gt_activation(self, x: 'torch.Tensor') ->int: count = 0 x = self.conv1(x) count += prod(list(x.size())) x = self.conv2(x) count += prod(list(x.size())) x = self.conv3(x) count += prod(list(x.size())) return count def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.conv3.weight primals_7 = self.conv3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
Jennifer-Rigdon/fvcore
SmallConvNet
false
5,391
[ "Apache-2.0" ]
1
7e800a86f2df93da017e07380543b4060ab88c94
https://github.com/Jennifer-Rigdon/fvcore/tree/7e800a86f2df93da017e07380543b4060ab88c94
import torch import torch.nn as nn from numpy import prod class Model(nn.Module): """ A network with three conv layers. This is used for testing convolution layers for activation count. """ def __init__(self, input_dim: 'int') ->None: super().__init__() conv_dim1 = 8 conv_dim2 = 4 conv_dim3 = 2 self.conv1 = nn.Conv2d(input_dim, conv_dim1, 1, 1) self.conv2 = nn.Conv2d(conv_dim1, conv_dim2, 1, 2) self.conv3 = nn.Conv2d(conv_dim2, conv_dim3, 1, 2) def forward(self, x: 'torch.Tensor') ->torch.Tensor: x = self.conv1(x) x = self.conv2(x) x = self.conv3(x) return x def get_gt_activation(self, x: 'torch.Tensor') ->int: count = 0 x = self.conv1(x) count += prod(list(x.size())) x = self.conv2(x) count += prod(list(x.size())) x = self.conv3(x) count += prod(list(x.size())) return count def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4]
Encoder
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/ew/cewggrlrjef57abtf6el5udfhzggviurshs4m5fekyv42qhvvt7h.py # Topologically Sorted Source Nodes: [mixture_w], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # mixture_w => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%squeeze,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp3 = 0.0 tmp4 = tmp2 <= tmp3 tl.store(in_out_ptr0 + (x0), tmp2, xmask) tl.store(out_ptr0 + (x0), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(reinterpret_tensor(primals_2, (1, 4, 4), (16, 4, 1), 0), primals_1, stride=(2,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf0, (1, 4, 1), (4, 1, 1)) buf1 = reinterpret_tensor(buf0, (4, 1), (1, 1), 0); del buf0 # reuse buf2 = empty_strided_cuda((4, 1), (1, 1), torch.bool) # Topologically Sorted Source Nodes: [mixture_w], Original ATen: [aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0.run(buf1, buf2, 4, grid=grid(4), stream=stream0) return (buf1, primals_1, reinterpret_tensor(primals_2, (1, 4, 4), (16, 4, 1), 0), buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn import torch.hub import torch.nn.functional as F class Encoder(nn.Module): """Estimation of the nonnegative mixture weight by a 1-D conv layer. """ def __init__(self, L, N, audio_channels): super(Encoder, self).__init__() self.L, self.N = L, N self.conv1d_U = nn.Conv1d(audio_channels, N, kernel_size=L, stride= L // 2, bias=False) def forward(self, mixture): """ Args: mixture: [M, T], M is batch size, T is #samples Returns: mixture_w: [M, N, K], where K = (T-L)/(L/2)+1 = 2T/L-1 """ mixture_w = F.relu(self.conv1d_U(mixture)) return mixture_w def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'L': 4, 'N': 4, 'audio_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn import torch.hub assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp3 = 0.0 tmp4 = tmp2 <= tmp3 tl.store(in_out_ptr0 + x0, tmp2, xmask) tl.store(out_ptr0 + x0, tmp4, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(reinterpret_tensor(primals_2, (1, 4, 4), (16, 4, 1), 0), primals_1, stride=(2,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf0, (1, 4, 1), (4, 1, 1)) buf1 = reinterpret_tensor(buf0, (4, 1), (1, 1), 0) del buf0 buf2 = empty_strided_cuda((4, 1), (1, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(4)](buf1, buf2, 4, XBLOCK=4, num_warps=1, num_stages=1) return buf1, primals_1, reinterpret_tensor(primals_2, (1, 4, 4), (16, 4, 1), 0), buf2 class EncoderNew(nn.Module): """Estimation of the nonnegative mixture weight by a 1-D conv layer. """ def __init__(self, L, N, audio_channels): super(EncoderNew, self).__init__() self.L, self.N = L, N self.conv1d_U = nn.Conv1d(audio_channels, N, kernel_size=L, stride= L // 2, bias=False) def forward(self, input_0): primals_1 = self.conv1d_U.weight primals_2 = input_0 output = call([primals_1, primals_2]) return output[0]
JavierCane/demucs
Encoder
false
5,392
[ "MIT" ]
1
01d14844a71be7b5d86adf06a8501a951157c3fe
https://github.com/JavierCane/demucs/tree/01d14844a71be7b5d86adf06a8501a951157c3fe
import torch from torch import nn import torch.hub import torch.nn.functional as F class Model(nn.Module): """Estimation of the nonnegative mixture weight by a 1-D conv layer. """ def __init__(self, L, N, audio_channels): super().__init__() self.L, self.N = L, N self.conv1d_U = nn.Conv1d(audio_channels, N, kernel_size=L, stride= L // 2, bias=False) def forward(self, mixture): """ Args: mixture: [M, T], M is batch size, T is #samples Returns: mixture_w: [M, N, K], where K = (T-L)/(L/2)+1 = 2T/L-1 """ mixture_w = F.relu(self.conv1d_U(mixture)) return mixture_w def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [4, 4, 4]
DeConv2dBlock
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/yb/cybpjmzbifjzzgcvo732lai5metqpn6lv5qgv2usa3v2ykeb3mq7.py # Topologically Sorted Source Nodes: [x, x_2], Original ATen: [aten.convolution, aten.silu] # Source node to ATen node mapping: # x => convolution # x_2 => mul, sigmoid # Graph fragment: # %convolution : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [2, 2], [2, 2], [1, 1], True, [1, 1], 1), kwargs = {}) # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%convolution,), kwargs = {}) # %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, %sigmoid), kwargs = {}) triton_poi_fused_convolution_silu_0 = async_compile.triton('triton_poi_fused_convolution_silu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_silu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_silu_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 36) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tmp4 = tmp2 * tmp3 tl.store(in_out_ptr0 + (x3), tmp2, xmask) tl.store(out_ptr0 + (x3), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/4f/c4f4mu3fy43wvkdpljn3idkt2qjt7pkweg7fpd4ialsrbzpnr46o.py # Topologically Sorted Source Nodes: [x_3, x_4], Original ATen: [aten.convolution, aten.silu] # Source node to ATen node mapping: # x_3 => convolution_1 # x_4 => mul_1, sigmoid_1 # Graph fragment: # %convolution_1 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%mul, %primals_4, %primals_5, [2, 2], [1, 1], [1, 1], True, [1, 1], 1), kwargs = {}) # %sigmoid_1 : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%convolution_1,), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_1, %sigmoid_1), kwargs = {}) triton_poi_fused_convolution_silu_1 = async_compile.triton('triton_poi_fused_convolution_silu_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4096], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_silu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_silu_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 2304 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 144) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tmp4 = tmp2 * tmp3 tl.store(in_out_ptr0 + (x3), tmp2, xmask) tl.store(out_ptr0 + (x3), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=True, output_padding=(1, 1), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 6, 6), (144, 36, 6, 1)) buf1 = buf0; del buf0 # reuse buf2 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32) # Topologically Sorted Source Nodes: [x, x_2], Original ATen: [aten.convolution, aten.silu] stream0 = get_raw_stream(0) triton_poi_fused_convolution_silu_0.run(buf1, primals_2, buf2, 576, grid=grid(576), stream=stream0) del primals_2 # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.convolution] buf3 = extern_kernels.convolution(buf2, primals_4, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=True, output_padding=(1, 1), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 12, 12), (576, 144, 12, 1)) buf4 = buf3; del buf3 # reuse buf5 = empty_strided_cuda((4, 4, 12, 12), (576, 144, 12, 1), torch.float32) # Topologically Sorted Source Nodes: [x_3, x_4], Original ATen: [aten.convolution, aten.silu] triton_poi_fused_convolution_silu_1.run(buf4, primals_5, buf5, 2304, grid=grid(2304), stream=stream0) del primals_5 return (buf5, primals_1, primals_3, primals_4, buf1, buf2, buf4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn class DeConv2dBlock(nn.Module): """ Similar to a LeNet block 4x upsampling, dimension hard-coded """ def __init__(self, in_dim: 'int', hidden_dim: 'int', out_dim: 'int', stride: 'int'=2, kernel_size: 'int'=3, padding: 'int'=2, output_padding: 'int'=1, dropout=0.1, activation_type='silu', debug =False): super(DeConv2dBlock, self).__init__() padding1 = padding // 2 if padding // 2 >= 1 else 1 self.deconv0 = nn.ConvTranspose2d(in_channels=in_dim, out_channels= hidden_dim, kernel_size=kernel_size, stride=stride, output_padding=output_padding, padding=padding) self.deconv1 = nn.ConvTranspose2d(in_channels=hidden_dim, out_channels=out_dim, kernel_size=kernel_size, stride=stride, output_padding=output_padding, padding=padding1) self.activation = nn.SiLU() if activation_type == 'silu' else nn.ReLU() self.dropout = nn.Dropout(dropout) self.debug = debug def forward(self, x): x = self.deconv0(x) x = self.dropout(x) x = self.activation(x) x = self.deconv1(x) x = self.activation(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_dim': 4, 'hidden_dim': 4, 'out_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_silu_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 36 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tmp4 = tmp2 * tmp3 tl.store(in_out_ptr0 + x3, tmp2, xmask) tl.store(out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_silu_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 2304 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 144 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tmp4 = tmp2 * tmp3 tl.store(in_out_ptr0 + x3, tmp2, xmask) tl.store(out_ptr0 + x3, tmp4, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=True, output_padding=(1, 1), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 6, 6), (144, 36, 6, 1)) buf1 = buf0 del buf0 buf2 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32) get_raw_stream(0) triton_poi_fused_convolution_silu_0[grid(576)](buf1, primals_2, buf2, 576, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf3 = extern_kernels.convolution(buf2, primals_4, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=True, output_padding=(1, 1), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 12, 12), (576, 144, 12, 1)) buf4 = buf3 del buf3 buf5 = empty_strided_cuda((4, 4, 12, 12), (576, 144, 12, 1), torch. float32) triton_poi_fused_convolution_silu_1[grid(2304)](buf4, primals_5, buf5, 2304, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 return buf5, primals_1, primals_3, primals_4, buf1, buf2, buf4 class DeConv2dBlockNew(nn.Module): """ Similar to a LeNet block 4x upsampling, dimension hard-coded """ def __init__(self, in_dim: 'int', hidden_dim: 'int', out_dim: 'int', stride: 'int'=2, kernel_size: 'int'=3, padding: 'int'=2, output_padding: 'int'=1, dropout=0.1, activation_type='silu', debug =False): super(DeConv2dBlockNew, self).__init__() padding1 = padding // 2 if padding // 2 >= 1 else 1 self.deconv0 = nn.ConvTranspose2d(in_channels=in_dim, out_channels= hidden_dim, kernel_size=kernel_size, stride=stride, output_padding=output_padding, padding=padding) self.deconv1 = nn.ConvTranspose2d(in_channels=hidden_dim, out_channels=out_dim, kernel_size=kernel_size, stride=stride, output_padding=output_padding, padding=padding1) self.activation = nn.SiLU() if activation_type == 'silu' else nn.ReLU() self.dropout = nn.Dropout(dropout) self.debug = debug def forward(self, input_0): primals_1 = self.deconv0.weight primals_2 = self.deconv0.bias primals_4 = self.deconv1.weight primals_5 = self.deconv1.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
Jimmy-INL/fourier-transformer
DeConv2dBlock
false
5,393
[ "MIT" ]
1
44a6ebc68aef24a4eb9aaa2a8c518ede56ec47ce
https://github.com/Jimmy-INL/fourier-transformer/tree/44a6ebc68aef24a4eb9aaa2a8c518ede56ec47ce
import torch from torch import nn class Model(nn.Module): """ Similar to a LeNet block 4x upsampling, dimension hard-coded """ def __init__(self, in_dim: 'int', hidden_dim: 'int', out_dim: 'int', stride: 'int'=2, kernel_size: 'int'=3, padding: 'int'=2, output_padding: 'int'=1, dropout=0.1, activation_type='silu', debug =False): super().__init__() padding1 = padding // 2 if padding // 2 >= 1 else 1 self.deconv0 = nn.ConvTranspose2d(in_channels=in_dim, out_channels= hidden_dim, kernel_size=kernel_size, stride=stride, output_padding=output_padding, padding=padding) self.deconv1 = nn.ConvTranspose2d(in_channels=hidden_dim, out_channels=out_dim, kernel_size=kernel_size, stride=stride, output_padding=output_padding, padding=padding1) self.activation = nn.SiLU() if activation_type == 'silu' else nn.ReLU() self.dropout = nn.Dropout(dropout) self.debug = debug def forward(self, x): x = self.deconv0(x) x = self.dropout(x) x = self.activation(x) x = self.deconv1(x) x = self.activation(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 4, 4]
DQN
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/5y/c5yq7wkgmmcygrawripwacy566sggsmh2mzk5izw35wk7ferohhu.py # Topologically Sorted Source Nodes: [state], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # state => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8192], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 6400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 100 x2 = xindex % 1600 x3 = (xindex // 1600) tmp0 = tl.load(in_out_ptr0 + (x4), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x4), tmp4, xmask) tl.store(out_ptr0 + (x2 + (1664*x3)), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (100, 4), (4, 1)) assert_size_stride(primals_2, (100, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 100), (100, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, 100), (100, 1)) assert_size_stride(primals_7, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 100), (100, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 100), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 100), (1600, 400, 100, 1), 0); del buf0 # reuse buf4 = empty_strided_cuda((4, 4, 4, 100), (1664, 400, 100, 1), torch.bool) # Topologically Sorted Source Nodes: [state], Original ATen: [aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf4, 6400, grid=grid(6400), stream=stream0) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 100), (100, 1), 0), reinterpret_tensor(primals_4, (100, 4), (1, 100), 0), alpha=1, beta=1, out=buf2) del primals_5 buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.addmm] extern_kernels.addmm(primals_7, reinterpret_tensor(buf1, (64, 100), (100, 1), 0), reinterpret_tensor(primals_6, (100, 4), (1, 100), 0), alpha=1, beta=1, out=buf3) del primals_7 return (reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 100), (100, 1), 0), primals_6, primals_4, buf4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((100, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((100, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 100), (100, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 100), (100, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F import torch.nn as nn class DQN(nn.Module): """A simple deep Q network implementation. Computes Q values for each (action, object) tuple given an input state vector """ def __init__(self, state_dim, action_dim, object_dim, hidden_size=100): super(DQN, self).__init__() self.state_encoder = nn.Linear(state_dim, hidden_size) self.state2action = nn.Linear(hidden_size, action_dim) self.state2object = nn.Linear(hidden_size, object_dim) def forward(self, x): state = F.relu(self.state_encoder(x)) return self.state2action(state), self.state2object(state) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'state_dim': 4, 'action_dim': 4, 'object_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 6400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 100 x2 = xindex % 1600 x3 = xindex // 1600 tmp0 = tl.load(in_out_ptr0 + x4, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x4, tmp4, xmask) tl.store(out_ptr0 + (x2 + 1664 * x3), tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (100, 4), (4, 1)) assert_size_stride(primals_2, (100,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 100), (100, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 100), (100, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 100), (100, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 100), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 100), (1600, 400, 100, 1), 0) del buf0 buf4 = empty_strided_cuda((4, 4, 4, 100), (1664, 400, 100, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(6400)](buf1, primals_2, buf4, 6400, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 100), (100, 1), 0), reinterpret_tensor(primals_4, (100, 4), (1, 100), 0), alpha=1, beta=1, out=buf2) del primals_5 buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf1, (64, 100), (100, 1), 0), reinterpret_tensor(primals_6, (100, 4), (1, 100), 0), alpha=1, beta=1, out=buf3) del primals_7 return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 100), (100, 1), 0 ), primals_6, primals_4, buf4 class DQNNew(nn.Module): """A simple deep Q network implementation. Computes Q values for each (action, object) tuple given an input state vector """ def __init__(self, state_dim, action_dim, object_dim, hidden_size=100): super(DQNNew, self).__init__() self.state_encoder = nn.Linear(state_dim, hidden_size) self.state2action = nn.Linear(hidden_size, action_dim) self.state2object = nn.Linear(hidden_size, object_dim) def forward(self, input_0): primals_1 = self.state_encoder.weight primals_2 = self.state_encoder.bias primals_4 = self.state2action.weight primals_5 = self.state2action.bias primals_6 = self.state2object.weight primals_7 = self.state2object.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0], output[1]
Jerimat/MITx-6.86-MachineLearning_EdX
DQN
false
5,394
[ "MIT" ]
1
e454e0646cd923d689d3946ea2ff3432dec920ac
https://github.com/Jerimat/MITx-6.86-MachineLearning_EdX/tree/e454e0646cd923d689d3946ea2ff3432dec920ac
import torch import torch.nn.functional as F import torch.nn as nn class Model(nn.Module): """A simple deep Q network implementation. Computes Q values for each (action, object) tuple given an input state vector """ def __init__(self, state_dim, action_dim, object_dim, hidden_size=100): super().__init__() self.state_encoder = nn.Linear(state_dim, hidden_size) self.state2action = nn.Linear(hidden_size, action_dim) self.state2object = nn.Linear(hidden_size, object_dim) def forward(self, x): state = F.relu(self.state_encoder(x)) return self.state2action(state), self.state2object(state) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 4, 4]
Actor
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/6o/c6o7ainbzocsswla76yvmdsc5donraaar3dzlx2icwrueb7fc46u.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # x_1 => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) # %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16384], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, None) tl.store(out_ptr0 + (x2), tmp6, None) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/hj/chjzotk5iydxvuetxetlv36s7car7cdb24whkuqihxwcy5kkr4o2.py # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.tanh] # Source node to ATen node mapping: # x_5 => tanh # Graph fragment: # %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%view_5,), kwargs = {}) triton_poi_fused_tanh_1 = async_compile.triton('triton_poi_fused_tanh_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_tanh_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + (x2), tmp3, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (256, 4), (4, 1)) assert_size_stride(primals_3, (256, ), (1, )) assert_size_stride(primals_4, (256, 256), (256, 1)) assert_size_stride(primals_5, (256, ), (1, )) assert_size_stride(primals_6, (4, 256), (256, 1)) assert_size_stride(primals_7, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 256), (256, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 256), (1, 4), 0), out=buf0) del primals_2 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 256), (4096, 1024, 256, 1), 0); del buf0 # reuse buf7 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1), torch.bool) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_3, buf7, 16384, grid=grid(16384), stream=stream0) del primals_3 buf2 = empty_strided_cuda((64, 256), (256, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf1, (64, 256), (256, 1), 0), reinterpret_tensor(primals_4, (256, 256), (1, 256), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 256), (4096, 1024, 256, 1), 0); del buf2 # reuse buf6 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1), torch.bool) # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_0.run(buf3, primals_5, buf6, 16384, grid=grid(16384), stream=stream0) del primals_5 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf3, (64, 256), (256, 1), 0), reinterpret_tensor(primals_6, (256, 4), (1, 256), 0), out=buf4) buf5 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf4 # reuse # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.tanh] triton_poi_fused_tanh_1.run(buf5, primals_7, 256, grid=grid(256), stream=stream0) del primals_7 return (buf5, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 256), (256, 1), 0), reinterpret_tensor(buf3, (64, 256), (256, 1), 0), buf5, primals_6, buf6, primals_4, buf7, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((256, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((256, 256), (256, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 256), (256, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import numpy as np import torch.nn as nn import torch.nn.functional as F class Actor(nn.Module): def __init__(self, state_dim: 'int', action_dim: 'int'): """ Initialize the network param: state_dim : Size of the state space param: action_dim: Size of the action space """ super(Actor, self).__init__() hidden_dim_1 = 256 hidden_dim_2 = 256 self.fc1 = nn.Linear(state_dim, hidden_dim_1) self.fc2 = nn.Linear(hidden_dim_1, hidden_dim_2) self.fc4 = nn.Linear(hidden_dim_2, action_dim) self.fc1.weight.data.uniform_(-1 / np.sqrt(state_dim), 1 / np.sqrt( state_dim)) self.fc2.weight.data.uniform_(-1 / np.sqrt(hidden_dim_1), 1 / np. sqrt(hidden_dim_1)) self.fc4.weight.data.uniform_(-0.003, 0.003) def forward(self, state: 'torch.Tensor') ->torch.Tensor: """ Define the forward pass param: state: The state of the environment """ x = state x = self.fc1(x) x = F.relu(x) x = self.fc2(x) x = F.relu(x) x = self.fc4(x) x = torch.tanh(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'state_dim': 4, 'action_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import numpy as np import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, None) tl.store(out_ptr0 + x2, tmp6, None) @triton.jit def triton_poi_fused_tanh_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (256, 4), (4, 1)) assert_size_stride(primals_3, (256,), (1,)) assert_size_stride(primals_4, (256, 256), (256, 1)) assert_size_stride(primals_5, (256,), (1,)) assert_size_stride(primals_6, (4, 256), (256, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 256), (256, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 256), (1, 4), 0), out=buf0) del primals_2 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 256), (4096, 1024, 256, 1), 0 ) del buf0 buf7 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(16384)](buf1, primals_3, buf7, 16384, XBLOCK=128, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((64, 256), (256, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 256), (256, 1), 0), reinterpret_tensor(primals_4, (256, 256), (1, 256), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 256), (4096, 1024, 256, 1), 0 ) del buf2 buf6 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1), torch.bool) triton_poi_fused_relu_threshold_backward_0[grid(16384)](buf3, primals_5, buf6, 16384, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf3, (64, 256), (256, 1), 0), reinterpret_tensor(primals_6, (256, 4), (1, 256), 0), out=buf4) buf5 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf4 triton_poi_fused_tanh_1[grid(256)](buf5, primals_7, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_7 return buf5, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 256), (256, 1), 0 ), reinterpret_tensor(buf3, (64, 256), (256, 1), 0 ), buf5, primals_6, buf6, primals_4, buf7 class ActorNew(nn.Module): def __init__(self, state_dim: 'int', action_dim: 'int'): """ Initialize the network param: state_dim : Size of the state space param: action_dim: Size of the action space """ super(ActorNew, self).__init__() hidden_dim_1 = 256 hidden_dim_2 = 256 self.fc1 = nn.Linear(state_dim, hidden_dim_1) self.fc2 = nn.Linear(hidden_dim_1, hidden_dim_2) self.fc4 = nn.Linear(hidden_dim_2, action_dim) self.fc1.weight.data.uniform_(-1 / np.sqrt(state_dim), 1 / np.sqrt( state_dim)) self.fc2.weight.data.uniform_(-1 / np.sqrt(hidden_dim_1), 1 / np. sqrt(hidden_dim_1)) self.fc4.weight.data.uniform_(-0.003, 0.003) def forward(self, input_0): primals_2 = self.fc1.weight primals_3 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_6 = self.fc4.weight primals_7 = self.fc4.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
JiangengDong/ECE276C
Actor
false
5,395
[ "MIT" ]
1
2338b5226d6fed8858402e8d67db7f2eead98221
https://github.com/JiangengDong/ECE276C/tree/2338b5226d6fed8858402e8d67db7f2eead98221
import torch import numpy as np import torch.nn as nn import torch.nn.functional as F class Model(nn.Module): def __init__(self, state_dim: 'int', action_dim: 'int'): """ Initialize the network param: state_dim : Size of the state space param: action_dim: Size of the action space """ super().__init__() hidden_dim_1 = 256 hidden_dim_2 = 256 self.fc1 = nn.Linear(state_dim, hidden_dim_1) self.fc2 = nn.Linear(hidden_dim_1, hidden_dim_2) self.fc4 = nn.Linear(hidden_dim_2, action_dim) self.fc1.weight.data.uniform_(-1 / np.sqrt(state_dim), 1 / np.sqrt( state_dim)) self.fc2.weight.data.uniform_(-1 / np.sqrt(hidden_dim_1), 1 / np. sqrt(hidden_dim_1)) self.fc4.weight.data.uniform_(-0.003, 0.003) def forward(self, state: 'torch.Tensor') ->torch.Tensor: """ Define the forward pass param: state: The state of the environment """ x = state x = self.fc1(x) x = F.relu(x) x = self.fc2(x) x = F.relu(x) x = self.fc4(x) x = torch.tanh(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 4]
FreqUpsample
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/2r/c2rjiuvqj5zfl55v7snvova3mhqw3tzjdcdl43qazfcjdkbiif3a.py # Topologically Sorted Source Nodes: [interpolate], Original ATen: [aten._unsafe_index] # Source node to ATen node mapping: # interpolate => _unsafe_index # Graph fragment: # %_unsafe_index : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%arg0_1, [None, None, %unsqueeze, %convert_element_type_3]), kwargs = {}) triton_poi_fused__unsafe_index_0 = async_compile.triton('triton_poi_fused__unsafe_index_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__unsafe_index_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 16) % 4 x0 = xindex % 16 x2 = (xindex // 64) x4 = xindex tmp0 = x1 tmp1 = tmp0.to(tl.float32) tmp2 = 1.0 tmp3 = tmp1 * tmp2 tmp4 = tmp3.to(tl.int32) tmp5 = x0 tmp6 = tmp5.to(tl.float32) tmp7 = 0.25 tmp8 = tmp6 * tmp7 tmp9 = tmp8.to(tl.int32) tmp10 = tl.load(in_ptr0 + (tmp9 + (4*tmp4) + (16*x2)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (x4), tmp10, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 16), (256, 64, 16, 1), torch.float32) # Topologically Sorted Source Nodes: [interpolate], Original ATen: [aten._unsafe_index] stream0 = get_raw_stream(0) triton_poi_fused__unsafe_index_0.run(arg0_1, buf0, 1024, grid=grid(1024), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import Tensor from torch import nn from torch.nn import functional as F class FreqUpsample(nn.Module): def __init__(self, factor: 'int', mode='nearest'): super().__init__() self.f = float(factor) self.mode = mode def forward(self, x: 'Tensor') ->Tensor: return F.interpolate(x, scale_factor=[1.0, self.f], mode=self.mode) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'factor': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__unsafe_index_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 % 4 x0 = xindex % 16 x2 = xindex // 64 x4 = xindex tmp0 = x1 tmp1 = tmp0.to(tl.float32) tmp2 = 1.0 tmp3 = tmp1 * tmp2 tmp4 = tmp3.to(tl.int32) tmp5 = x0 tmp6 = tmp5.to(tl.float32) tmp7 = 0.25 tmp8 = tmp6 * tmp7 tmp9 = tmp8.to(tl.int32) tmp10 = tl.load(in_ptr0 + (tmp9 + 4 * tmp4 + 16 * x2), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x4, tmp10, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 16), (256, 64, 16, 1), torch. float32) get_raw_stream(0) triton_poi_fused__unsafe_index_0[grid(1024)](arg0_1, buf0, 1024, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf0, class FreqUpsampleNew(nn.Module): def __init__(self, factor: 'int', mode='nearest'): super().__init__() self.f = float(factor) self.mode = mode def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
JinmingChe/DeepFilterNet
FreqUpsample
false
5,396
[ "ECL-2.0", "Apache-2.0", "MIT" ]
1
0e35a24c33c091b4c34afb3599f2945bf5e87adf
https://github.com/JinmingChe/DeepFilterNet/tree/0e35a24c33c091b4c34afb3599f2945bf5e87adf
import torch from torch import Tensor from torch import nn from torch.nn import functional as F class Model(nn.Module): def __init__(self, factor: 'int', mode='nearest'): super().__init__() self.f = float(factor) self.mode = mode def forward(self, x: 'Tensor') ->Tensor: return F.interpolate(x, scale_factor=[1.0, self.f], mode=self.mode) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4]
ResidualBlock
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/v6/cv6oewqqnsshd7he7ylh2kikzu4smtrhj2dmv6nb5csosp7g6vw5.py # Topologically Sorted Source Nodes: [out], Original ATen: [aten.reflection_pad2d] # Source node to ATen node mapping: # out => _unsafe_index, _unsafe_index_1 # Graph fragment: # %_unsafe_index : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_1, [None, None, %sub_1, None]), kwargs = {}) # %_unsafe_index_1 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index, [None, None, None, %sub_1]), kwargs = {}) triton_poi_fused_reflection_pad2d_0 = async_compile.triton('triton_poi_fused_reflection_pad2d_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_reflection_pad2d_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 6 x1 = (xindex // 6) % 6 x2 = (xindex // 36) x3 = xindex tmp0 = tl.load(in_ptr0 + (15 + ((-1)*(tl_math.abs((-3) + (tl_math.abs((-1) + x0))))) + ((-4)*(tl_math.abs((-3) + (tl_math.abs((-1) + x1))))) + (16*x2)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (x3), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/t6/ct6syu6rq3n7yx3zuog2yujcrfreefdccraqz7zj2m3c5xhvp5vl.py # Topologically Sorted Source Nodes: [out_1, instance_norm], Original ATen: [aten.convolution, aten._native_batch_norm_legit] # Source node to ATen node mapping: # instance_norm => add, rsqrt, var_mean # out_1 => convolution # Graph fragment: # %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_1, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view, [0, 2, 3]), kwargs = {correction: 0, keepdim: True}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {}) # %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) triton_per_fused__native_batch_norm_legit_convolution_1 = async_compile.triton('triton_per_fused__native_batch_norm_legit_convolution_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[16, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit_convolution_1', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__native_batch_norm_legit_convolution_1(in_out_ptr0, in_out_ptr1, in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 16 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x3 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (r2 + (16*x3)), xmask, other=0.0) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = tl.where(xmask, tmp3, 0) tmp6 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp8 = tl.where(xmask, tmp6, 0) tmp9 = tl.sum(tmp8, 1)[:, None] tmp10 = tl.full([XBLOCK, 1], 16, tl.int32) tmp11 = tmp10.to(tl.float32) tmp12 = tmp9 / tmp11 tmp13 = tmp3 - tmp12 tmp14 = tmp13 * tmp13 tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK]) tmp17 = tl.where(xmask, tmp15, 0) tmp18 = tl.sum(tmp17, 1)[:, None] tmp19 = 16.0 tmp20 = tmp18 / tmp19 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(in_out_ptr0 + (r2 + (16*x3)), tmp2, xmask) tl.debug_barrier() tl.store(in_out_ptr1 + (x3), tmp23, xmask) tl.store(out_ptr0 + (x3), tmp12, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/3g/c3gbhm3y6wldudvsxdmmjh5ssg2uys5qqk3dd3k7bxnuot4xhndp.py # Topologically Sorted Source Nodes: [instance_norm], Original ATen: [aten.repeat] # Source node to ATen node mapping: # instance_norm => repeat # Graph fragment: # %repeat : [num_users=2] = call_function[target=torch.ops.aten.repeat.default](args = (%primals_4, [4]), kwargs = {}) triton_poi_fused_repeat_2 = async_compile.triton('triton_poi_fused_repeat_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_repeat_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_repeat_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0 % 4), xmask) tl.store(out_ptr0 + (x0), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/6x/c6xlvvnj6ftmp7jka4547n3hpffcz5xr3op3wtbpv5povsb6rjue.py # Topologically Sorted Source Nodes: [x, out_2], Original ATen: [aten.relu, aten.reflection_pad2d] # Source node to ATen node mapping: # out_2 => _unsafe_index_2, _unsafe_index_3 # x => relu # Graph fragment: # %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) # %_unsafe_index_2 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%relu, [None, None, %sub_1, None]), kwargs = {}) # %_unsafe_index_3 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index_2, [None, None, None, %sub_1]), kwargs = {}) triton_poi_fused_reflection_pad2d_relu_3 = async_compile.triton('triton_poi_fused_reflection_pad2d_relu_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_reflection_pad2d_relu_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_reflection_pad2d_relu_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 6 x1 = (xindex // 6) % 6 x2 = (xindex // 36) x3 = xindex tmp0 = tl.load(in_ptr0 + (15 + ((-1)*(tl_math.abs((-3) + (tl_math.abs((-1) + x0))))) + ((-4)*(tl_math.abs((-3) + (tl_math.abs((-1) + x1))))) + (16*x2)), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x2), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x2), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x2), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + (x2), xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tmp9 = tl.full([1], 0, tl.int32) tmp10 = triton_helpers.maximum(tmp9, tmp8) tl.store(out_ptr0 + (x3), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/f6/cf6z47jjg5avrtu2dn7ifr4mx5pcq57zzoksapbtrbrglr3gqnxi.py # Topologically Sorted Source Nodes: [out_3, x_1, x_2], Original ATen: [aten.convolution, aten.repeat, aten._native_batch_norm_legit, aten.add] # Source node to ATen node mapping: # out_3 => convolution_1 # x_1 => add_2, repeat_2, rsqrt_1, var_mean_1 # x_2 => add_4 # Graph fragment: # %convolution_1 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_3, %primals_6, %primals_7, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %repeat_2 : [num_users=2] = call_function[target=torch.ops.aten.repeat.default](args = (%primals_8, [4]), kwargs = {}) # %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_2, [0, 2, 3]), kwargs = {correction: 0, keepdim: True}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-05), kwargs = {}) # %rsqrt_1 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_2,), kwargs = {}) # %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_3, %primals_1), kwargs = {}) triton_per_fused__native_batch_norm_legit_add_convolution_repeat_4 = async_compile.triton('triton_per_fused__native_batch_norm_legit_add_convolution_repeat_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[16, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: 'i32', 10: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit_add_convolution_repeat_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__native_batch_norm_legit_add_convolution_repeat_4(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, out_ptr3, out_ptr4, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 16 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) x0 = xindex r3 = rindex x1 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x0 % 4), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_out_ptr0 + (r3 + (16*x0)), xmask, other=0.0) tmp2 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp28 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp30 = tl.load(in_ptr3 + (r3 + (16*x0)), xmask, other=0.0) tmp3 = tmp1 + tmp2 tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK]) tmp9 = tl.where(xmask, tmp7, 0) tmp10 = tl.sum(tmp9, 1)[:, None] tmp11 = tl.full([XBLOCK, 1], 16, tl.int32) tmp12 = tmp11.to(tl.float32) tmp13 = tmp10 / tmp12 tmp14 = tmp4 - tmp13 tmp15 = tmp14 * tmp14 tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK]) tmp18 = tl.where(xmask, tmp16, 0) tmp19 = tl.sum(tmp18, 1)[:, None] tmp20 = tmp3 - tmp13 tmp21 = 16.0 tmp22 = tmp19 / tmp21 tmp23 = 1e-05 tmp24 = tmp22 + tmp23 tmp25 = libdevice.rsqrt(tmp24) tmp26 = tmp20 * tmp25 tmp27 = tmp26 * tmp0 tmp29 = tmp27 + tmp28 tmp31 = tmp29 + tmp30 tl.store(out_ptr0 + (x0), tmp0, xmask) tl.store(in_out_ptr0 + (r3 + (16*x0)), tmp3, xmask) tl.store(out_ptr3 + (r3 + (16*x0)), tmp31, xmask) tl.store(out_ptr4 + (x0), tmp25, xmask) tl.store(out_ptr1 + (x0), tmp13, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, ), (1, )) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_7, (4, ), (1, )) assert_size_stride(primals_8, (4, ), (1, )) assert_size_stride(primals_9, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32) # Topologically Sorted Source Nodes: [out], Original ATen: [aten.reflection_pad2d] stream0 = get_raw_stream(0) triton_poi_fused_reflection_pad2d_0.run(primals_1, buf0, 576, grid=grid(576), stream=stream0) # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1)) buf2 = buf1; del buf1 # reuse buf5 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 1, 1), torch.float32) buf6 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32) buf8 = reinterpret_tensor(buf6, (1, 16, 1, 1), (16, 1, 1, 1), 0); del buf6 # reuse # Topologically Sorted Source Nodes: [out_1, instance_norm], Original ATen: [aten.convolution, aten._native_batch_norm_legit] triton_per_fused__native_batch_norm_legit_convolution_1.run(buf2, buf8, primals_3, buf5, 16, 16, grid=grid(16), stream=stream0) del primals_3 buf3 = empty_strided_cuda((16, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [instance_norm], Original ATen: [aten.repeat] triton_poi_fused_repeat_2.run(primals_4, buf3, 16, grid=grid(16), stream=stream0) del primals_4 buf4 = empty_strided_cuda((16, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [instance_norm], Original ATen: [aten.repeat] triton_poi_fused_repeat_2.run(primals_5, buf4, 16, grid=grid(16), stream=stream0) del primals_5 buf9 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32) # Topologically Sorted Source Nodes: [x, out_2], Original ATen: [aten.relu, aten.reflection_pad2d] triton_poi_fused_reflection_pad2d_relu_3.run(buf2, buf5, buf8, buf3, buf4, buf9, 576, grid=grid(576), stream=stream0) # Topologically Sorted Source Nodes: [out_3], Original ATen: [aten.convolution] buf10 = extern_kernels.convolution(buf9, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf10, (4, 4, 4, 4), (64, 16, 4, 1)) buf12 = empty_strided_cuda((16, ), (1, ), torch.float32) buf11 = buf10; del buf10 # reuse buf13 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32) buf17 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf16 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32) # Topologically Sorted Source Nodes: [out_3, x_1, x_2], Original ATen: [aten.convolution, aten.repeat, aten._native_batch_norm_legit, aten.add] triton_per_fused__native_batch_norm_legit_add_convolution_repeat_4.run(buf11, primals_8, primals_7, primals_9, primals_1, buf12, buf13, buf17, buf16, 16, 16, grid=grid(16), stream=stream0) del primals_1 del primals_7 del primals_8 del primals_9 return (buf17, primals_2, primals_6, buf0, buf2, buf3, buf4, buf5, buf8, buf9, buf11, buf12, reinterpret_tensor(buf16, (16, ), (1, ), 0), reinterpret_tensor(buf13, (1, 16, 1, 1), (16, 1, 1, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import numpy as np import torch.nn as nn class ConvLayer(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride): super(ConvLayer, self).__init__() reflection_padding = int(np.floor(kernel_size / 2)) self.reflection_pad = nn.ReflectionPad2d(reflection_padding) self.conv2d = nn.Conv2d(in_channels, out_channels, kernel_size, stride) def forward(self, x): out = self.reflection_pad(x) out = self.conv2d(out) return out class ResidualBlock(nn.Module): """ ResidualBlock introduced in: https://arxiv.org/abs/1512.03385 recommended architecture: http://torch.ch/blog/2016/02/04/resnets.html """ def __init__(self, num_channels): super(ResidualBlock, self).__init__() self.conv1 = ConvLayer(num_channels, num_channels, kernel_size=3, stride=1) self.instance1 = nn.InstanceNorm2d(num_channels, affine=True) self.conv2 = ConvLayer(num_channels, num_channels, kernel_size=3, stride=1) self.instance2 = nn.InstanceNorm2d(num_channels, affine=True) self.relu = nn.ReLU() def forward(self, x): residual = x x = self.relu(self.instance1(self.conv1(x))) x = self.instance2(self.conv2(x)) x += residual return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import numpy as np import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 6 x1 = xindex // 6 % 6 x2 = xindex // 36 x3 = xindex tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 + x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x3, tmp0, xmask) @triton.jit def triton_per_fused__native_batch_norm_legit_convolution_1(in_out_ptr0, in_out_ptr1, in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x3 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (r2 + 16 * x3), xmask, other=0.0) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tl.where(xmask, tmp3, 0) tmp6 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp8 = tl.where(xmask, tmp6, 0) tmp9 = tl.sum(tmp8, 1)[:, None] tmp10 = tl.full([XBLOCK, 1], 16, tl.int32) tmp11 = tmp10.to(tl.float32) tmp12 = tmp9 / tmp11 tmp13 = tmp3 - tmp12 tmp14 = tmp13 * tmp13 tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK]) tmp17 = tl.where(xmask, tmp15, 0) tmp18 = tl.sum(tmp17, 1)[:, None] tmp19 = 16.0 tmp20 = tmp18 / tmp19 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(in_out_ptr0 + (r2 + 16 * x3), tmp2, xmask) tl.debug_barrier() tl.store(in_out_ptr1 + x3, tmp23, xmask) tl.store(out_ptr0 + x3, tmp12, xmask) @triton.jit def triton_poi_fused_repeat_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0 % 4, xmask) tl.store(out_ptr0 + x0, tmp0, xmask) @triton.jit def triton_poi_fused_reflection_pad2d_relu_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 6 x1 = xindex // 6 % 6 x2 = xindex // 36 x3 = xindex tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 + x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x2, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x2, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tmp9 = tl.full([1], 0, tl.int32) tmp10 = triton_helpers.maximum(tmp9, tmp8) tl.store(out_ptr0 + x3, tmp10, xmask) @triton.jit def triton_per_fused__native_batch_norm_legit_add_convolution_repeat_4( in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, out_ptr3, out_ptr4, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) x0 = xindex r3 = rindex x1 = xindex % 4 tmp0 = tl.load(in_ptr0 + x0 % 4, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_out_ptr0 + (r3 + 16 * x0), xmask, other=0.0) tmp2 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp28 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp30 = tl.load(in_ptr3 + (r3 + 16 * x0), xmask, other=0.0) tmp3 = tmp1 + tmp2 tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tl.where(xmask, tmp4, 0) tmp7 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK]) tmp9 = tl.where(xmask, tmp7, 0) tmp10 = tl.sum(tmp9, 1)[:, None] tmp11 = tl.full([XBLOCK, 1], 16, tl.int32) tmp12 = tmp11.to(tl.float32) tmp13 = tmp10 / tmp12 tmp14 = tmp4 - tmp13 tmp15 = tmp14 * tmp14 tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK]) tmp18 = tl.where(xmask, tmp16, 0) tmp19 = tl.sum(tmp18, 1)[:, None] tmp20 = tmp3 - tmp13 tmp21 = 16.0 tmp22 = tmp19 / tmp21 tmp23 = 1e-05 tmp24 = tmp22 + tmp23 tmp25 = libdevice.rsqrt(tmp24) tmp26 = tmp20 * tmp25 tmp27 = tmp26 * tmp0 tmp29 = tmp27 + tmp28 tmp31 = tmp29 + tmp30 tl.store(out_ptr0 + x0, tmp0, xmask) tl.store(in_out_ptr0 + (r3 + 16 * x0), tmp3, xmask) tl.store(out_ptr3 + (r3 + 16 * x0), tmp31, xmask) tl.store(out_ptr4 + x0, tmp25, xmask) tl.store(out_ptr1 + x0, tmp13, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32) get_raw_stream(0) triton_poi_fused_reflection_pad2d_0[grid(576)](primals_1, buf0, 576, XBLOCK=128, num_warps=4, num_stages=1) buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1)) buf2 = buf1 del buf1 buf5 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 1, 1), torch.float32) buf6 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32 ) buf8 = reinterpret_tensor(buf6, (1, 16, 1, 1), (16, 1, 1, 1), 0) del buf6 triton_per_fused__native_batch_norm_legit_convolution_1[grid(16)](buf2, buf8, primals_3, buf5, 16, 16, XBLOCK=1, num_warps=2, num_stages=1) del primals_3 buf3 = empty_strided_cuda((16,), (1,), torch.float32) triton_poi_fused_repeat_2[grid(16)](primals_4, buf3, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_4 buf4 = empty_strided_cuda((16,), (1,), torch.float32) triton_poi_fused_repeat_2[grid(16)](primals_5, buf4, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_5 buf9 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32) triton_poi_fused_reflection_pad2d_relu_3[grid(576)](buf2, buf5, buf8, buf3, buf4, buf9, 576, XBLOCK=128, num_warps=4, num_stages=1) buf10 = extern_kernels.convolution(buf9, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf10, (4, 4, 4, 4), (64, 16, 4, 1)) buf12 = empty_strided_cuda((16,), (1,), torch.float32) buf11 = buf10 del buf10 buf13 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch. float32) buf17 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf16 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch. float32) triton_per_fused__native_batch_norm_legit_add_convolution_repeat_4[grid (16)](buf11, primals_8, primals_7, primals_9, primals_1, buf12, buf13, buf17, buf16, 16, 16, XBLOCK=8, num_warps=2, num_stages=1) del primals_1 del primals_7 del primals_8 del primals_9 return (buf17, primals_2, primals_6, buf0, buf2, buf3, buf4, buf5, buf8, buf9, buf11, buf12, reinterpret_tensor(buf16, (16,), (1,), 0), reinterpret_tensor(buf13, (1, 16, 1, 1), (16, 1, 1, 1), 0)) class ConvLayer(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride): super(ConvLayer, self).__init__() reflection_padding = int(np.floor(kernel_size / 2)) self.reflection_pad = nn.ReflectionPad2d(reflection_padding) self.conv2d = nn.Conv2d(in_channels, out_channels, kernel_size, stride) def forward(self, x): out = self.reflection_pad(x) out = self.conv2d(out) return out class ResidualBlockNew(nn.Module): """ ResidualBlock introduced in: https://arxiv.org/abs/1512.03385 recommended architecture: http://torch.ch/blog/2016/02/04/resnets.html """ def __init__(self, num_channels): super(ResidualBlockNew, self).__init__() self.conv1 = ConvLayer(num_channels, num_channels, kernel_size=3, stride=1) self.instance1 = nn.InstanceNorm2d(num_channels, affine=True) self.conv2 = ConvLayer(num_channels, num_channels, kernel_size=3, stride=1) self.instance2 = nn.InstanceNorm2d(num_channels, affine=True) self.relu = nn.ReLU() def forward(self, input_0): primals_2 = self.conv1.conv2d.weight primals_3 = self.conv1.conv2d.bias primals_4 = self.instance1.weight primals_5 = self.instance1.bias primals_6 = self.conv2.conv2d.weight primals_7 = self.conv2.conv2d.bias primals_8 = self.instance2.weight primals_9 = self.instance2.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
IvoryCandy/neural-style
ResidualBlock
false
5,397
[ "Apache-2.0" ]
1
d9d73676479e36c1cbd6c9af36d857f80099504b
https://github.com/IvoryCandy/neural-style/tree/d9d73676479e36c1cbd6c9af36d857f80099504b
import torch import numpy as np import torch.nn as nn class ConvLayer(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride): super().__init__() reflection_padding = int(np.floor(kernel_size / 2)) self.reflection_pad = nn.ReflectionPad2d(reflection_padding) self.conv2d = nn.Conv2d(in_channels, out_channels, kernel_size, stride) def forward(self, x): out = self.reflection_pad(x) out = self.conv2d(out) return out class Model(nn.Module): """ ResidualBlock introduced in: https://arxiv.org/abs/1512.03385 recommended architecture: http://torch.ch/blog/2016/02/04/resnets.html """ def __init__(self, num_channels): super().__init__() self.conv1 = ConvLayer(num_channels, num_channels, kernel_size=3, stride=1) self.instance1 = nn.InstanceNorm2d(num_channels, affine=True) self.conv2 = ConvLayer(num_channels, num_channels, kernel_size=3, stride=1) self.instance2 = nn.InstanceNorm2d(num_channels, affine=True) self.relu = nn.ReLU() def forward(self, x): residual = x x = self.relu(self.instance1(self.conv1(x))) x = self.instance2(self.conv2(x)) x += residual return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4]
SoftDetectionModule
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/72/c72xsm4j2ydy65cd6s2dqcf73hjvkqbi2lraxbqgd3vljynafcgn.py # Topologically Sorted Source Nodes: [max_1], Original ATen: [aten.max] # Source node to ATen node mapping: # max_1 => max_1 # Graph fragment: # %max_1 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%view, 1), kwargs = {}) triton_per_fused_max_0 = async_compile.triton('triton_per_fused_max_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[4, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_max_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_max_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 4 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (64*x0)), xmask, other=0.0) tmp1 = tl.full([1, 1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = tl.where(xmask, tmp3, float("-inf")) tmp6 = triton_helpers.max2(tmp5, 1)[:, None] tl.store(out_ptr0 + (x0), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/yh/cyhmw3wd3ttwv3gwn5pdrawto4dhpm5r565oikeygp4rncuwxbr2.py # Topologically Sorted Source Nodes: [batch, truediv, exp, pad], Original ATen: [aten.relu, aten.div, aten.exp, aten.constant_pad_nd] # Source node to ATen node mapping: # batch => relu # exp => exp # pad => constant_pad_nd # truediv => div # Graph fragment: # %relu : [num_users=4] = call_function[target=torch.ops.aten.relu.default](args = (%arg0_1,), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%relu, %view_1), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div,), kwargs = {}) # %constant_pad_nd : [num_users=1] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%exp, [1, 1, 1, 1], 1.0), kwargs = {}) triton_poi_fused_constant_pad_nd_div_exp_relu_1 = async_compile.triton('triton_poi_fused_constant_pad_nd_div_exp_relu_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_constant_pad_nd_div_exp_relu_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_constant_pad_nd_div_exp_relu_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 6) % 6 x0 = xindex % 6 x4 = (xindex // 36) x3 = (xindex // 144) x6 = xindex tmp0 = (-1) + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = (-1) + x0 tmp6 = tmp5 >= tmp1 tmp7 = tmp5 < tmp3 tmp8 = tmp2 & tmp4 tmp9 = tmp8 & tmp6 tmp10 = tmp9 & tmp7 tmp11 = tl.load(in_ptr0 + ((-5) + x0 + (4*x1) + (16*x4)), tmp10 & xmask, other=0.0) tmp12 = tl.full([1], 0, tl.int32) tmp13 = triton_helpers.maximum(tmp12, tmp11) tmp14 = tl.load(in_ptr1 + (x3), tmp10 & xmask, eviction_policy='evict_last', other=0.0) tmp15 = tmp13 / tmp14 tmp16 = tl_math.exp(tmp15) tmp17 = tl.full(tmp16.shape, 1.0, tmp16.dtype) tmp18 = tl.where(tmp10, tmp16, tmp17) tl.store(out_ptr0 + (x6), tmp18, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/av/cavduzqn2kcecsivfbdoiquab64kxakphvxa2ir5glbkv2uxr56x.py # Topologically Sorted Source Nodes: [batch, truediv, exp, pad, avg_pool2d], Original ATen: [aten.relu, aten.div, aten.exp, aten.constant_pad_nd, aten.avg_pool2d] # Source node to ATen node mapping: # avg_pool2d => avg_pool2d # batch => relu # exp => exp # pad => constant_pad_nd # truediv => div # Graph fragment: # %relu : [num_users=4] = call_function[target=torch.ops.aten.relu.default](args = (%arg0_1,), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%relu, %view_1), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div,), kwargs = {}) # %constant_pad_nd : [num_users=1] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%exp, [1, 1, 1, 1], 1.0), kwargs = {}) # %avg_pool2d : [num_users=1] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%constant_pad_nd, [3, 3], [1, 1]), kwargs = {}) triton_poi_fused_avg_pool2d_constant_pad_nd_div_exp_relu_2 = async_compile.triton('triton_poi_fused_avg_pool2d_constant_pad_nd_div_exp_relu_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_avg_pool2d_constant_pad_nd_div_exp_relu_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_avg_pool2d_constant_pad_nd_div_exp_relu_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) % 4 x2 = (xindex // 16) x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (6*x1) + (36*x2)), xmask) tmp1 = tl.load(in_ptr0 + (1 + x0 + (6*x1) + (36*x2)), xmask) tmp3 = tl.load(in_ptr0 + (2 + x0 + (6*x1) + (36*x2)), xmask) tmp5 = tl.load(in_ptr0 + (6 + x0 + (6*x1) + (36*x2)), xmask) tmp7 = tl.load(in_ptr0 + (7 + x0 + (6*x1) + (36*x2)), xmask) tmp9 = tl.load(in_ptr0 + (8 + x0 + (6*x1) + (36*x2)), xmask) tmp11 = tl.load(in_ptr0 + (12 + x0 + (6*x1) + (36*x2)), xmask) tmp13 = tl.load(in_ptr0 + (13 + x0 + (6*x1) + (36*x2)), xmask) tmp15 = tl.load(in_ptr0 + (14 + x0 + (6*x1) + (36*x2)), xmask) tmp2 = tmp1 + tmp0 tmp4 = tmp3 + tmp2 tmp6 = tmp5 + tmp4 tmp8 = tmp7 + tmp6 tmp10 = tmp9 + tmp8 tmp12 = tmp11 + tmp10 tmp14 = tmp13 + tmp12 tmp16 = tmp15 + tmp14 tmp17 = 0.1111111111111111 tmp18 = tmp16 * tmp17 tl.store(out_ptr0 + (x3), tmp18, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/6v/c6v2dq4j27mrmzkudmd2l2ookfsao2o6sqtsagdumpcc33lwpchn.py # Topologically Sorted Source Nodes: [batch, truediv, exp, sum_exp, local_max_score, depth_wise_max_score, all_scores, max_3, sum_1, score_1], Original ATen: [aten.relu, aten.div, aten.exp, aten.mul, aten.max, aten.sum] # Source node to ATen node mapping: # all_scores => mul_1 # batch => relu # depth_wise_max_score => div_2 # exp => exp # local_max_score => div_1 # max_3 => max_3 # score_1 => div_3 # sum_1 => sum_1 # sum_exp => mul # truediv => div # Graph fragment: # %relu : [num_users=4] = call_function[target=torch.ops.aten.relu.default](args = (%arg0_1,), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%relu, %view_1), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%avg_pool2d, 9), kwargs = {}) # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %mul), kwargs = {}) # %div_2 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%relu, %unsqueeze), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div_1, %div_2), kwargs = {}) # %max_3 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%mul_1, 1), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%view_2, [1]), kwargs = {}) # %div_3 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%getitem_4, %view_3), kwargs = {}) triton_per_fused_div_exp_max_mul_relu_sum_3 = async_compile.triton('triton_per_fused_div_exp_max_mul_relu_sum_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[4, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_div_exp_max_mul_relu_sum_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_div_exp_max_mul_relu_sum_3(in_ptr0, in_ptr1, in_ptr2, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 4 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (64*x0)), xmask, other=0.0) tmp3 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr2 + (r1 + (64*x0)), xmask, other=0.0) tmp10 = tl.load(in_ptr0 + (16 + r1 + (64*x0)), xmask, other=0.0) tmp13 = tl.load(in_ptr0 + (32 + r1 + (64*x0)), xmask, other=0.0) tmp16 = tl.load(in_ptr0 + (48 + r1 + (64*x0)), xmask, other=0.0) tmp23 = tl.load(in_ptr2 + (16 + r1 + (64*x0)), xmask, other=0.0) tmp31 = tl.load(in_ptr2 + (32 + r1 + (64*x0)), xmask, other=0.0) tmp39 = tl.load(in_ptr2 + (48 + r1 + (64*x0)), xmask, other=0.0) tmp1 = tl.full([1, 1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = tmp2 / tmp3 tmp5 = tl_math.exp(tmp4) tmp7 = 9.0 tmp8 = tmp6 * tmp7 tmp9 = tmp5 / tmp8 tmp11 = triton_helpers.maximum(tmp1, tmp10) tmp12 = triton_helpers.maximum(tmp2, tmp11) tmp14 = triton_helpers.maximum(tmp1, tmp13) tmp15 = triton_helpers.maximum(tmp12, tmp14) tmp17 = triton_helpers.maximum(tmp1, tmp16) tmp18 = triton_helpers.maximum(tmp15, tmp17) tmp19 = tmp2 / tmp18 tmp20 = tmp9 * tmp19 tmp21 = tmp11 / tmp3 tmp22 = tl_math.exp(tmp21) tmp24 = tmp23 * tmp7 tmp25 = tmp22 / tmp24 tmp26 = tmp11 / tmp18 tmp27 = tmp25 * tmp26 tmp28 = triton_helpers.maximum(tmp20, tmp27) tmp29 = tmp14 / tmp3 tmp30 = tl_math.exp(tmp29) tmp32 = tmp31 * tmp7 tmp33 = tmp30 / tmp32 tmp34 = tmp14 / tmp18 tmp35 = tmp33 * tmp34 tmp36 = triton_helpers.maximum(tmp28, tmp35) tmp37 = tmp17 / tmp3 tmp38 = tl_math.exp(tmp37) tmp40 = tmp39 * tmp7 tmp41 = tmp38 / tmp40 tmp42 = tmp17 / tmp18 tmp43 = tmp41 * tmp42 tmp44 = triton_helpers.maximum(tmp36, tmp43) tmp45 = tl.broadcast_to(tmp44, [XBLOCK, RBLOCK]) tmp47 = tl.where(xmask, tmp45, 0) tmp48 = tl.sum(tmp47, 1)[:, None] tmp49 = tmp44 / tmp48 tl.store(out_ptr2 + (r1 + (16*x0)), tmp49, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [max_1], Original ATen: [aten.max] stream0 = get_raw_stream(0) triton_per_fused_max_0.run(arg0_1, buf0, 4, 64, grid=grid(4), stream=stream0) buf2 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32) # Topologically Sorted Source Nodes: [batch, truediv, exp, pad], Original ATen: [aten.relu, aten.div, aten.exp, aten.constant_pad_nd] triton_poi_fused_constant_pad_nd_div_exp_relu_1.run(arg0_1, buf0, buf2, 576, grid=grid(576), stream=stream0) buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [batch, truediv, exp, pad, avg_pool2d], Original ATen: [aten.relu, aten.div, aten.exp, aten.constant_pad_nd, aten.avg_pool2d] triton_poi_fused_avg_pool2d_constant_pad_nd_div_exp_relu_2.run(buf2, buf3, 256, grid=grid(256), stream=stream0) del buf2 buf6 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [batch, truediv, exp, sum_exp, local_max_score, depth_wise_max_score, all_scores, max_3, sum_1, score_1], Original ATen: [aten.relu, aten.div, aten.exp, aten.mul, aten.max, aten.sum] triton_per_fused_div_exp_max_mul_relu_sum_3.run(arg0_1, buf0, buf3, buf6, 4, 16, grid=grid(4), stream=stream0) del arg0_1 del buf0 del buf3 return (buf6, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F import torch.nn as nn class SoftDetectionModule(nn.Module): def __init__(self, soft_local_max_size=3): super(SoftDetectionModule, self).__init__() self.soft_local_max_size = soft_local_max_size self.pad = self.soft_local_max_size // 2 def forward(self, batch): b = batch.size(0) batch = F.relu(batch) max_per_sample = torch.max(batch.view(b, -1), dim=1)[0] exp = torch.exp(batch / max_per_sample.view(b, 1, 1, 1)) sum_exp = self.soft_local_max_size ** 2 * F.avg_pool2d(F.pad(exp, [ self.pad] * 4, mode='constant', value=1.0), self. soft_local_max_size, stride=1) local_max_score = exp / sum_exp depth_wise_max = torch.max(batch, dim=1)[0] depth_wise_max_score = batch / depth_wise_max.unsqueeze(1) all_scores = local_max_score * depth_wise_max_score score = torch.max(all_scores, dim=1)[0] score = score / torch.sum(score.view(b, -1), dim=1).view(b, 1, 1) return score def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_max_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl. constexpr): xnumel = 4 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0) tmp1 = tl.full([1, 1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = tl.where(xmask, tmp3, float('-inf')) tmp6 = triton_helpers.max2(tmp5, 1)[:, None] tl.store(out_ptr0 + x0, tmp6, xmask) @triton.jit def triton_poi_fused_constant_pad_nd_div_exp_relu_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 6 % 6 x0 = xindex % 6 x4 = xindex // 36 x3 = xindex // 144 x6 = xindex tmp0 = -1 + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = -1 + x0 tmp6 = tmp5 >= tmp1 tmp7 = tmp5 < tmp3 tmp8 = tmp2 & tmp4 tmp9 = tmp8 & tmp6 tmp10 = tmp9 & tmp7 tmp11 = tl.load(in_ptr0 + (-5 + x0 + 4 * x1 + 16 * x4), tmp10 & xmask, other=0.0) tmp12 = tl.full([1], 0, tl.int32) tmp13 = triton_helpers.maximum(tmp12, tmp11) tmp14 = tl.load(in_ptr1 + x3, tmp10 & xmask, eviction_policy= 'evict_last', other=0.0) tmp15 = tmp13 / tmp14 tmp16 = tl_math.exp(tmp15) tmp17 = tl.full(tmp16.shape, 1.0, tmp16.dtype) tmp18 = tl.where(tmp10, tmp16, tmp17) tl.store(out_ptr0 + x6, tmp18, xmask) @triton.jit def triton_poi_fused_avg_pool2d_constant_pad_nd_div_exp_relu_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 6 * x1 + 36 * x2), xmask) tmp1 = tl.load(in_ptr0 + (1 + x0 + 6 * x1 + 36 * x2), xmask) tmp3 = tl.load(in_ptr0 + (2 + x0 + 6 * x1 + 36 * x2), xmask) tmp5 = tl.load(in_ptr0 + (6 + x0 + 6 * x1 + 36 * x2), xmask) tmp7 = tl.load(in_ptr0 + (7 + x0 + 6 * x1 + 36 * x2), xmask) tmp9 = tl.load(in_ptr0 + (8 + x0 + 6 * x1 + 36 * x2), xmask) tmp11 = tl.load(in_ptr0 + (12 + x0 + 6 * x1 + 36 * x2), xmask) tmp13 = tl.load(in_ptr0 + (13 + x0 + 6 * x1 + 36 * x2), xmask) tmp15 = tl.load(in_ptr0 + (14 + x0 + 6 * x1 + 36 * x2), xmask) tmp2 = tmp1 + tmp0 tmp4 = tmp3 + tmp2 tmp6 = tmp5 + tmp4 tmp8 = tmp7 + tmp6 tmp10 = tmp9 + tmp8 tmp12 = tmp11 + tmp10 tmp14 = tmp13 + tmp12 tmp16 = tmp15 + tmp14 tmp17 = 0.1111111111111111 tmp18 = tmp16 * tmp17 tl.store(out_ptr0 + x3, tmp18, xmask) @triton.jit def triton_per_fused_div_exp_max_mul_relu_sum_3(in_ptr0, in_ptr1, in_ptr2, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0) tmp3 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr2 + (r1 + 64 * x0), xmask, other=0.0) tmp10 = tl.load(in_ptr0 + (16 + r1 + 64 * x0), xmask, other=0.0) tmp13 = tl.load(in_ptr0 + (32 + r1 + 64 * x0), xmask, other=0.0) tmp16 = tl.load(in_ptr0 + (48 + r1 + 64 * x0), xmask, other=0.0) tmp23 = tl.load(in_ptr2 + (16 + r1 + 64 * x0), xmask, other=0.0) tmp31 = tl.load(in_ptr2 + (32 + r1 + 64 * x0), xmask, other=0.0) tmp39 = tl.load(in_ptr2 + (48 + r1 + 64 * x0), xmask, other=0.0) tmp1 = tl.full([1, 1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = tmp2 / tmp3 tmp5 = tl_math.exp(tmp4) tmp7 = 9.0 tmp8 = tmp6 * tmp7 tmp9 = tmp5 / tmp8 tmp11 = triton_helpers.maximum(tmp1, tmp10) tmp12 = triton_helpers.maximum(tmp2, tmp11) tmp14 = triton_helpers.maximum(tmp1, tmp13) tmp15 = triton_helpers.maximum(tmp12, tmp14) tmp17 = triton_helpers.maximum(tmp1, tmp16) tmp18 = triton_helpers.maximum(tmp15, tmp17) tmp19 = tmp2 / tmp18 tmp20 = tmp9 * tmp19 tmp21 = tmp11 / tmp3 tmp22 = tl_math.exp(tmp21) tmp24 = tmp23 * tmp7 tmp25 = tmp22 / tmp24 tmp26 = tmp11 / tmp18 tmp27 = tmp25 * tmp26 tmp28 = triton_helpers.maximum(tmp20, tmp27) tmp29 = tmp14 / tmp3 tmp30 = tl_math.exp(tmp29) tmp32 = tmp31 * tmp7 tmp33 = tmp30 / tmp32 tmp34 = tmp14 / tmp18 tmp35 = tmp33 * tmp34 tmp36 = triton_helpers.maximum(tmp28, tmp35) tmp37 = tmp17 / tmp3 tmp38 = tl_math.exp(tmp37) tmp40 = tmp39 * tmp7 tmp41 = tmp38 / tmp40 tmp42 = tmp17 / tmp18 tmp43 = tmp41 * tmp42 tmp44 = triton_helpers.maximum(tmp36, tmp43) tmp45 = tl.broadcast_to(tmp44, [XBLOCK, RBLOCK]) tmp47 = tl.where(xmask, tmp45, 0) tmp48 = tl.sum(tmp47, 1)[:, None] tmp49 = tmp44 / tmp48 tl.store(out_ptr2 + (r1 + 16 * x0), tmp49, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4,), (1,), torch.float32) get_raw_stream(0) triton_per_fused_max_0[grid(4)](arg0_1, buf0, 4, 64, XBLOCK=1, num_warps=2, num_stages=1) buf2 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32) triton_poi_fused_constant_pad_nd_div_exp_relu_1[grid(576)](arg0_1, buf0, buf2, 576, XBLOCK=128, num_warps=4, num_stages=1) buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_avg_pool2d_constant_pad_nd_div_exp_relu_2[grid(256)]( buf2, buf3, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf2 buf6 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_per_fused_div_exp_max_mul_relu_sum_3[grid(4)](arg0_1, buf0, buf3, buf6, 4, 16, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del buf0 del buf3 return buf6, class SoftDetectionModuleNew(nn.Module): def __init__(self, soft_local_max_size=3): super(SoftDetectionModuleNew, self).__init__() self.soft_local_max_size = soft_local_max_size self.pad = self.soft_local_max_size // 2 def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
JimmyYourHonor/D2-net-fast-ap
SoftDetectionModule
false
5,398
[ "BSD-3-Clause-Clear" ]
1
c4c0db23eae3aa4e3079b80b57887b4cb963b1e8
https://github.com/JimmyYourHonor/D2-net-fast-ap/tree/c4c0db23eae3aa4e3079b80b57887b4cb963b1e8
import torch import torch.nn.functional as F import torch.nn as nn class Model(nn.Module): def __init__(self, soft_local_max_size=3): super().__init__() self.soft_local_max_size = soft_local_max_size self.pad = self.soft_local_max_size // 2 def forward(self, batch): b = batch.size(0) batch = F.relu(batch) max_per_sample = torch.max(batch.view(b, -1), dim=1)[0] exp = torch.exp(batch / max_per_sample.view(b, 1, 1, 1)) sum_exp = self.soft_local_max_size ** 2 * F.avg_pool2d(F.pad(exp, [ self.pad] * 4, mode='constant', value=1.0), self. soft_local_max_size, stride=1) local_max_score = exp / sum_exp depth_wise_max = torch.max(batch, dim=1)[0] depth_wise_max_score = batch / depth_wise_max.unsqueeze(1) all_scores = local_max_score * depth_wise_max_score score = torch.max(all_scores, dim=1)[0] score = score / torch.sum(score.view(b, -1), dim=1).view(b, 1, 1) return score def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
ConvNet
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/gv/cgvv46qwn6yizboetp5d3iglxiki6ctifxljoe2gyrvuroc7stxb.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] # Source node to ATen node mapping: # x => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%unsqueeze, %primals_1, %primals_2, [1, 1, 1], [4, 4, 4], [1, 1, 1], False, [0, 0, 0], 1), kwargs = {}) triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4096], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 2916 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 729) tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x2), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(reinterpret_tensor(primals_3, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), primals_1, stride=(1, 1, 1), padding=(4, 4, 4), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf0, (1, 4, 9, 9, 9), (2916, 729, 81, 9, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] stream0 = get_raw_stream(0) triton_poi_fused_convolution_0.run(buf1, primals_2, 2916, grid=grid(2916), stream=stream0) del primals_2 return (reinterpret_tensor(buf1, (4, 9, 9, 9), (729, 81, 9, 1), 0), primals_1, reinterpret_tensor(primals_3, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class ConvNet(nn.Module): """ A network with a single convolution layer. This is used for testing flop count for convolution layers. """ def __init__(self, conv_dim: 'int', input_dim: 'int', output_dim: 'int', kernel_size: 'int', spatial_dim: 'int', stride: 'int', padding: 'int', groups_num: 'int') ->None: super(ConvNet, self).__init__() if conv_dim == 1: convLayer = nn.Conv1d elif conv_dim == 2: convLayer = nn.Conv2d else: convLayer = nn.Conv3d self.conv = convLayer(input_dim, output_dim, kernel_size, stride, padding, groups=groups_num) def forward(self, x: 'torch.Tensor') ->torch.Tensor: x = self.conv(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'conv_dim': 4, 'input_dim': 4, 'output_dim': 4, 'kernel_size': 4, 'spatial_dim': 4, 'stride': 1, 'padding': 4, 'groups_num': 1}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 2916 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 729 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(reinterpret_tensor(primals_3, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), primals_1, stride=(1, 1, 1), padding=(4, 4, 4), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf0, (1, 4, 9, 9, 9), (2916, 729, 81, 9, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(2916)](buf1, primals_2, 2916, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 return reinterpret_tensor(buf1, (4, 9, 9, 9), (729, 81, 9, 1), 0 ), primals_1, reinterpret_tensor(primals_3, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0) class ConvNetNew(nn.Module): """ A network with a single convolution layer. This is used for testing flop count for convolution layers. """ def __init__(self, conv_dim: 'int', input_dim: 'int', output_dim: 'int', kernel_size: 'int', spatial_dim: 'int', stride: 'int', padding: 'int', groups_num: 'int') ->None: super(ConvNetNew, self).__init__() if conv_dim == 1: convLayer = nn.Conv1d elif conv_dim == 2: convLayer = nn.Conv2d else: convLayer = nn.Conv3d self.conv = convLayer(input_dim, output_dim, kernel_size, stride, padding, groups=groups_num) def forward(self, input_0): primals_1 = self.conv.weight primals_2 = self.conv.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
Jennifer-Rigdon/fvcore
ConvNet
false
5,399
[ "Apache-2.0" ]
1
7e800a86f2df93da017e07380543b4060ab88c94
https://github.com/Jennifer-Rigdon/fvcore/tree/7e800a86f2df93da017e07380543b4060ab88c94
import torch import torch.nn as nn class Model(nn.Module): """ A network with a single convolution layer. This is used for testing flop count for convolution layers. """ def __init__(self, conv_dim: 'int', input_dim: 'int', output_dim: 'int', kernel_size: 'int', spatial_dim: 'int', stride: 'int', padding: 'int', groups_num: 'int') ->None: super().__init__() if conv_dim == 1: convLayer = nn.Conv1d elif conv_dim == 2: convLayer = nn.Conv2d else: convLayer = nn.Conv3d self.conv = convLayer(input_dim, output_dim, kernel_size, stride, padding, groups=groups_num) def forward(self, x: 'torch.Tensor') ->torch.Tensor: x = self.conv(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'conv_dim': 4, 'input_dim': 4, 'output_dim': 4, 'kernel_size': 4, 'spatial_dim': 4, 'stride': 1, 'padding': 4, 'groups_num': 1}]
SiSdr
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/c5/cc545zhrt3tyhef5kaivf4deqpdpayhp73nzjodse5cfp5eoxu4z.py # Topologically Sorted Source Nodes: [add_1, a, e_true, Sss, Sss_1, add_2, e_res, Snn, Snn_1, add_3, truediv_1, log10, mul_1], Original ATen: [aten.add, aten.div, aten.mul, aten.pow, aten.sum, aten.sub, aten.log10] # Source node to ATen node mapping: # Snn => pow_2 # Snn_1 => sum_2 # Sss => pow_1 # Sss_1 => sum_1 # a => div # add_1 => add_1 # add_2 => add_2 # add_3 => add_3 # e_res => sub # e_true => mul # log10 => log10 # mul_1 => mul_1 # truediv_1 => div_1 # Graph fragment: # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%unsqueeze, 1.1920928955078125e-07), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%unsqueeze_1, %add_1), kwargs = {}) # %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, %arg1_1), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%mul, 2), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [-1]), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_1, 1.1920928955078125e-07), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %mul), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_2, [-1]), kwargs = {}) # %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_2, 1.1920928955078125e-07), kwargs = {}) # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_2, %add_3), kwargs = {}) # %log10 : [num_users=1] = call_function[target=torch.ops.aten.log10.default](args = (%div_1,), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%log10, 10), kwargs = {}) triton_poi_fused_add_div_log10_mul_pow_sub_sum_0 = async_compile.triton('triton_poi_fused_add_div_log10_mul_pow_sub_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_log10_mul_pow_sub_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 10, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_log10_mul_pow_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x0), xmask) tmp4 = tl.load(in_ptr2 + (x0), xmask) tmp7 = tl.load(in_ptr3 + (4*x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr3 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp18 = tl.load(in_ptr3 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp23 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp24 = tl.load(in_ptr3 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = 1.1920928955078125e-07 tmp3 = tmp1 + tmp2 tmp5 = tmp4 + tmp2 tmp6 = tmp3 / tmp5 tmp8 = tmp6 * tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp13 = tmp6 * tmp12 tmp14 = tmp11 - tmp13 tmp15 = tmp14 * tmp14 tmp16 = tmp10 + tmp15 tmp19 = tmp6 * tmp18 tmp20 = tmp17 - tmp19 tmp21 = tmp20 * tmp20 tmp22 = tmp16 + tmp21 tmp25 = tmp6 * tmp24 tmp26 = tmp23 - tmp25 tmp27 = tmp26 * tmp26 tmp28 = tmp22 + tmp27 tmp29 = tmp8 * tmp8 tmp30 = tmp13 * tmp13 tmp31 = tmp29 + tmp30 tmp32 = tmp19 * tmp19 tmp33 = tmp31 + tmp32 tmp34 = tmp25 * tmp25 tmp35 = tmp33 + tmp34 tmp36 = tmp35 + tmp2 tmp37 = tmp28 + tmp2 tmp38 = tmp36 / tmp37 tmp39 = libdevice.log10(tmp38) tmp40 = 10.0 tmp41 = tmp39 * tmp40 tl.store(in_out_ptr0 + (x0), tmp41, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [einsum_1], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(arg1_1, (4, 1, 4), (4, 4, 1), 0), reinterpret_tensor(arg0_1, (4, 4, 1), (4, 1, 1), 0), out=buf0) buf1 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [einsum], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(arg1_1, (4, 1, 4), (4, 4, 1), 0), reinterpret_tensor(arg1_1, (4, 4, 1), (4, 1, 1), 0), out=buf1) buf2 = empty_strided_cuda((4, ), (1, ), torch.float32) buf3 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [add_1, a, e_true, Sss, Sss_1, add_2, e_res, Snn, Snn_1, add_3, truediv_1, log10, mul_1], Original ATen: [aten.add, aten.div, aten.mul, aten.pow, aten.sum, aten.sub, aten.log10] stream0 = get_raw_stream(0) triton_poi_fused_add_div_log10_mul_pow_sub_sum_0.run(buf3, arg0_1, buf0, buf1, arg1_1, 4, grid=grid(4), stream=stream0) del arg0_1 del arg1_1 del buf0 del buf1 return (buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import Tensor from torch import nn class SiSdr(nn.Module): def __init__(self): super().__init__() def forward(self, input: 'Tensor', target: 'Tensor'): eps = torch.finfo(input.dtype).eps Rss: 'Tensor' = torch.einsum('bi,bi->b', target, target).unsqueeze(-1) a: 'Tensor' = torch.einsum('bi,bi->b', target, input).add(eps ).unsqueeze(-1) / Rss.add(eps) e_true = a * target e_res = input - e_true Sss = e_true.square() Snn = e_res.square() Sss = Sss.sum(-1) Snn = Snn.sum(-1) return 10 * torch.log10(Sss.add(eps) / Snn.add(eps)) def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_div_log10_mul_pow_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x0, xmask) tmp4 = tl.load(in_ptr2 + x0, xmask) tmp7 = tl.load(in_ptr3 + 4 * x0, xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr3 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp17 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp18 = tl.load(in_ptr3 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp23 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp24 = tl.load(in_ptr3 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = 1.1920928955078125e-07 tmp3 = tmp1 + tmp2 tmp5 = tmp4 + tmp2 tmp6 = tmp3 / tmp5 tmp8 = tmp6 * tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp13 = tmp6 * tmp12 tmp14 = tmp11 - tmp13 tmp15 = tmp14 * tmp14 tmp16 = tmp10 + tmp15 tmp19 = tmp6 * tmp18 tmp20 = tmp17 - tmp19 tmp21 = tmp20 * tmp20 tmp22 = tmp16 + tmp21 tmp25 = tmp6 * tmp24 tmp26 = tmp23 - tmp25 tmp27 = tmp26 * tmp26 tmp28 = tmp22 + tmp27 tmp29 = tmp8 * tmp8 tmp30 = tmp13 * tmp13 tmp31 = tmp29 + tmp30 tmp32 = tmp19 * tmp19 tmp33 = tmp31 + tmp32 tmp34 = tmp25 * tmp25 tmp35 = tmp33 + tmp34 tmp36 = tmp35 + tmp2 tmp37 = tmp28 + tmp2 tmp38 = tmp36 / tmp37 tmp39 = libdevice.log10(tmp38) tmp40 = 10.0 tmp41 = tmp39 * tmp40 tl.store(in_out_ptr0 + x0, tmp41, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(arg1_1, (4, 1, 4), (4, 4, 1), 0), reinterpret_tensor(arg0_1, (4, 4, 1), (4, 1, 1), 0), out=buf0) buf1 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(arg1_1, (4, 1, 4), (4, 4, 1), 0), reinterpret_tensor(arg1_1, (4, 4, 1), (4, 1, 1), 0), out=buf1) buf2 = empty_strided_cuda((4,), (1,), torch.float32) buf3 = buf2 del buf2 get_raw_stream(0) triton_poi_fused_add_div_log10_mul_pow_sub_sum_0[grid(4)](buf3, arg0_1, buf0, buf1, arg1_1, 4, XBLOCK=4, num_warps=1, num_stages=1) del arg0_1 del arg1_1 del buf0 del buf1 return buf3, class SiSdrNew(nn.Module): def __init__(self): super().__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
JinmingChe/DeepFilterNet
SiSdr
false
5,400
[ "ECL-2.0", "Apache-2.0", "MIT" ]
1
0e35a24c33c091b4c34afb3599f2945bf5e87adf
https://github.com/JinmingChe/DeepFilterNet/tree/0e35a24c33c091b4c34afb3599f2945bf5e87adf
import torch from torch import Tensor from torch import nn class Model(nn.Module): def __init__(self): super().__init__() def forward(self, input: 'Tensor', target: 'Tensor'): eps = torch.finfo(input.dtype).eps Rss: 'Tensor' = torch.einsum('bi,bi->b', target, target).unsqueeze(-1) a: 'Tensor' = torch.einsum('bi,bi->b', target, input).add(eps ).unsqueeze(-1) / Rss.add(eps) e_true = a * target e_res = input - e_true Sss = e_true.square() Snn = e_res.square() Sss = Sss.sum(-1) Snn = Snn.sum(-1) return 10 * torch.log10(Sss.add(eps) / Snn.add(eps)) def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return []
QuickGELU
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/vb/cvbzgl2kevabwrh6e5ay2dasnq6gi3a7zeveknxks7jkbuq64ime.py # Topologically Sorted Source Nodes: [mul, sigmoid, mul_1], Original ATen: [aten.mul, aten.sigmoid] # Source node to ATen node mapping: # mul => mul # mul_1 => mul_1 # sigmoid => sigmoid # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, 1.702), kwargs = {}) # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%mul,), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %sigmoid), kwargs = {}) triton_poi_fused_mul_sigmoid_0 = async_compile.triton('triton_poi_fused_mul_sigmoid_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sigmoid_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_sigmoid_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 1.702 tmp2 = tmp0 * tmp1 tmp3 = tl.sigmoid(tmp2) tmp4 = tmp0 * tmp3 tl.store(out_ptr0 + (x0), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul, sigmoid, mul_1], Original ATen: [aten.mul, aten.sigmoid] stream0 = get_raw_stream(0) triton_poi_fused_mul_sigmoid_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn class QuickGELU(nn.Module): def forward(self, x: 'torch.Tensor'): return x * torch.sigmoid(1.702 * x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mul_sigmoid_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 1.702 tmp2 = tmp0 * tmp1 tmp3 = tl.sigmoid(tmp2) tmp4 = tmp0 * tmp3 tl.store(out_ptr0 + x0, tmp4, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_sigmoid_0[grid(256)](arg0_1, buf0, 256, XBLOCK =256, num_warps=4, num_stages=1) del arg0_1 return buf0, class QuickGELUNew(nn.Module): def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
Jinsu-L/KELIP
QuickGELU
false
5,401
[ "Apache-2.0" ]
1
d3261cbb9ba3c3ad474dd560a5add8b69ed78477
https://github.com/Jinsu-L/KELIP/tree/d3261cbb9ba3c3ad474dd560a5add8b69ed78477
import torch from torch import nn class Model(nn.Module): def forward(self, x: 'torch.Tensor'): return x * torch.sigmoid(1.702 * x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
DfAlphaLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/yd/cyd7tvnq3hqj42ga37n64it2zmj24f6cwrvkhr2bxyqotwqieokr.py # Topologically Sorted Source Nodes: [mul, add, clamp, sub, mul_1, square, l_off, sub_2, mul_2, add_1, clamp_1, sub_1, mul_3, abs_1, mean_1, l_on, add_2], Original ATen: [aten.mul, aten.add, aten.clamp, aten.rsub, aten.pow, aten.mean, aten.abs] # Source node to ATen node mapping: # abs_1 => abs_1 # add => add # add_1 => add_1 # add_2 => add_2 # clamp => clamp_max, clamp_min # clamp_1 => clamp_max_1, clamp_min_1 # l_off => mean # l_on => mul_4 # mean_1 => mean_1 # mul => mul # mul_1 => mul_1 # mul_2 => mul_2 # mul_3 => mul_3 # square => pow_1 # sub => sub # sub_1 => sub_1 # sub_2 => sub_2 # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, 0.4), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, 4.0), kwargs = {}) # %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%add, 0.0), kwargs = {}) # %clamp_max : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min, 1.0), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %clamp_max), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, %sub), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%mul_1, 2), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%pow_1,), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg1_1), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, -0.2), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_2, 0.0), kwargs = {}) # %clamp_min_1 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%add_1, 0.0), kwargs = {}) # %clamp_max_1 : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_1, 1.0), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %clamp_max_1), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_2, %sub_1), kwargs = {}) # %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%mul_3,), kwargs = {}) # %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%abs_1,), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean_1, 0.1), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean, %mul_4), kwargs = {}) triton_per_fused_abs_add_clamp_mean_mul_pow_rsub_0 = async_compile.triton('triton_per_fused_abs_add_clamp_mean_mul_pow_rsub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_abs_add_clamp_mean_mul_pow_rsub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_abs_add_clamp_mean_mul_pow_rsub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.load(in_ptr1 + (r0), None) tmp2 = 0.4 tmp3 = tmp1 * tmp2 tmp4 = 4.0 tmp5 = tmp3 + tmp4 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = 1.0 tmp9 = triton_helpers.minimum(tmp7, tmp8) tmp10 = tmp8 - tmp9 tmp11 = tmp0 * tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [RBLOCK]) tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0)) tmp16 = tmp8 - tmp0 tmp17 = -0.2 tmp18 = tmp1 * tmp17 tmp19 = tmp18 + tmp6 tmp20 = triton_helpers.maximum(tmp19, tmp6) tmp21 = triton_helpers.minimum(tmp20, tmp8) tmp22 = tmp8 - tmp21 tmp23 = tmp16 * tmp22 tmp24 = tl_math.abs(tmp23) tmp25 = tl.broadcast_to(tmp24, [RBLOCK]) tmp27 = triton_helpers.promote_to_tensor(tl.sum(tmp25, 0)) tmp28 = 256.0 tmp29 = tmp15 / tmp28 tmp30 = tmp27 / tmp28 tmp31 = 0.1 tmp32 = tmp30 * tmp31 tmp33 = tmp29 + tmp32 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp33, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf2 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [mul, add, clamp, sub, mul_1, square, l_off, sub_2, mul_2, add_1, clamp_1, sub_1, mul_3, abs_1, mean_1, l_on, add_2], Original ATen: [aten.mul, aten.add, aten.clamp, aten.rsub, aten.pow, aten.mean, aten.abs] stream0 = get_raw_stream(0) triton_per_fused_abs_add_clamp_mean_mul_pow_rsub_0.run(buf2, arg1_1, arg0_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import Tensor from typing import Optional from torch import nn from typing import Final class DfAlphaLoss(nn.Module): """Add a penalty to use DF for very noisy segments. Starting from lsnr_thresh, the penalty is increased and has its maximum at lsnr_min. """ factor: 'Final[float]' lsnr_thresh: 'Final[float]' lsnr_min: 'Final[float]' def __init__(self, factor: 'float'=1, lsnr_thresh: 'float'=-7.5, lsnr_min: 'float'=-10.0): super().__init__() self.factor = factor self.lsnr_thresh = lsnr_thresh self.lsnr_min = lsnr_min def forward(self, pred_alpha: 'Tensor', target_lsnr: 'Tensor'): w = self.lsnr_mapping(target_lsnr, self.lsnr_thresh, self.lsnr_min ).view_as(pred_alpha) l_off = (pred_alpha * w).square().mean() w = self.lsnr_mapping(target_lsnr, self.lsnr_thresh + 2.5, 0.0 ).view_as(pred_alpha) l_on = 0.1 * ((1 - pred_alpha) * w).abs().mean() return l_off + l_on def lsnr_mapping(self, lsnr: 'Tensor', lsnr_thresh: 'float', lsnr_min: 'Optional[float]'=None) ->Tensor: """Map lsnr_min to 1 and lsnr_thresh to 0""" lsnr_min = float(self.lsnr_min) if lsnr_min is None else lsnr_min a_ = 1 / (lsnr_thresh - lsnr_min) b_ = -a_ * lsnr_min return 1 - torch.clamp(a_ * lsnr + b_, 0.0, 1.0) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch import Tensor from typing import Optional from torch import nn from typing import Final assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_abs_add_clamp_mean_mul_pow_rsub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = 0.4 tmp3 = tmp1 * tmp2 tmp4 = 4.0 tmp5 = tmp3 + tmp4 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = 1.0 tmp9 = triton_helpers.minimum(tmp7, tmp8) tmp10 = tmp8 - tmp9 tmp11 = tmp0 * tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [RBLOCK]) tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0)) tmp16 = tmp8 - tmp0 tmp17 = -0.2 tmp18 = tmp1 * tmp17 tmp19 = tmp18 + tmp6 tmp20 = triton_helpers.maximum(tmp19, tmp6) tmp21 = triton_helpers.minimum(tmp20, tmp8) tmp22 = tmp8 - tmp21 tmp23 = tmp16 * tmp22 tmp24 = tl_math.abs(tmp23) tmp25 = tl.broadcast_to(tmp24, [RBLOCK]) tmp27 = triton_helpers.promote_to_tensor(tl.sum(tmp25, 0)) tmp28 = 256.0 tmp29 = tmp15 / tmp28 tmp30 = tmp27 / tmp28 tmp31 = 0.1 tmp32 = tmp30 * tmp31 tmp33 = tmp29 + tmp32 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp33, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf2 = buf0 del buf0 get_raw_stream(0) triton_per_fused_abs_add_clamp_mean_mul_pow_rsub_0[grid(1)](buf2, arg1_1, arg0_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf2, class DfAlphaLossNew(nn.Module): """Add a penalty to use DF for very noisy segments. Starting from lsnr_thresh, the penalty is increased and has its maximum at lsnr_min. """ factor: 'Final[float]' lsnr_thresh: 'Final[float]' lsnr_min: 'Final[float]' def __init__(self, factor: 'float'=1, lsnr_thresh: 'float'=-7.5, lsnr_min: 'float'=-10.0): super().__init__() self.factor = factor self.lsnr_thresh = lsnr_thresh self.lsnr_min = lsnr_min def lsnr_mapping(self, lsnr: 'Tensor', lsnr_thresh: 'float', lsnr_min: 'Optional[float]'=None) ->Tensor: """Map lsnr_min to 1 and lsnr_thresh to 0""" lsnr_min = float(self.lsnr_min) if lsnr_min is None else lsnr_min a_ = 1 / (lsnr_thresh - lsnr_min) b_ = -a_ * lsnr_min return 1 - torch.clamp(a_ * lsnr + b_, 0.0, 1.0) def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
JinmingChe/DeepFilterNet
DfAlphaLoss
false
5,402
[ "ECL-2.0", "Apache-2.0", "MIT" ]
1
0e35a24c33c091b4c34afb3599f2945bf5e87adf
https://github.com/JinmingChe/DeepFilterNet/tree/0e35a24c33c091b4c34afb3599f2945bf5e87adf
import torch from torch import Tensor from typing import Optional from torch import nn from typing import Final class Model(nn.Module): """Add a penalty to use DF for very noisy segments. Starting from lsnr_thresh, the penalty is increased and has its maximum at lsnr_min. """ factor: 'Final[float]' lsnr_thresh: 'Final[float]' lsnr_min: 'Final[float]' def __init__(self, factor: 'float'=1, lsnr_thresh: 'float'=-7.5, lsnr_min: 'float'=-10.0): super().__init__() self.factor = factor self.lsnr_thresh = lsnr_thresh self.lsnr_min = lsnr_min def forward(self, pred_alpha: 'Tensor', target_lsnr: 'Tensor'): w = self.lsnr_mapping(target_lsnr, self.lsnr_thresh, self.lsnr_min ).view_as(pred_alpha) l_off = (pred_alpha * w).square().mean() w = self.lsnr_mapping(target_lsnr, self.lsnr_thresh + 2.5, 0.0 ).view_as(pred_alpha) l_on = 0.1 * ((1 - pred_alpha) * w).abs().mean() return l_off + l_on def lsnr_mapping(self, lsnr: 'Tensor', lsnr_thresh: 'float', lsnr_min: 'Optional[float]'=None) ->Tensor: """Map lsnr_min to 1 and lsnr_thresh to 0""" lsnr_min = float(self.lsnr_min) if lsnr_min is None else lsnr_min a_ = 1 / (lsnr_thresh - lsnr_min) b_ = -a_ * lsnr_min return 1 - torch.clamp(a_ * lsnr + b_, 0.0, 1.0) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
LogsticRegression
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/z5/cz5bgdo2gmhnnmtf6w7lrjkvliacxo7nomq7mbmjquxqyxqgt5bj.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # x => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 2 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, xmask) tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/xk/cxkugsynlmnyrjhah42fewrhwovuvurnuv2qimo2qhxq27wjmq7q.py # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] # Source node to ATen node mapping: # softmax => amax, exp, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_3, [1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_3, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x3), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/jf/cjfzp64ny4hf7wdw5wptah3hqv5fcsh5rrw4brz7uxcy6ad57n7h.py # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] # Source node to ATen node mapping: # softmax => div, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x3), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (2, 4), (4, 1)) assert_size_stride(primals_2, (2, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 2), (2, 1)) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 2), (2, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 2), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 2), (32, 8, 2, 1), 0); del buf0 # reuse buf5 = empty_strided_cuda((4, 4, 4, 2), (32, 8, 2, 1), torch.bool) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf5, 128, grid=grid(128), stream=stream0) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 2), (2, 1), 0), reinterpret_tensor(primals_4, (2, 4), (1, 2), 0), alpha=1, beta=1, out=buf2) del primals_5 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf2, buf3, 256, grid=grid(256), stream=stream0) buf4 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf2 # reuse # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] triton_poi_fused__softmax_2.run(buf3, buf4, 256, grid=grid(256), stream=stream0) del buf3 return (buf4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 2), (2, 1), 0), buf4, primals_4, buf5, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((2, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 2), (2, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class LogsticRegression(nn.Module): def __init__(self, in_dim, n_class): super().__init__() self.fc1 = nn.Linear(in_dim, in_dim // 2) self.fc2 = nn.Linear(in_dim // 2, n_class) def forward(self, x): x = F.relu(self.fc1(x)) x = self.fc2(x) return F.softmax(x, 1) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_dim': 4, 'n_class': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 2 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x3, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (2, 4), (4, 1)) assert_size_stride(primals_2, (2,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 2), (2, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 2), (2, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 2), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 2), (32, 8, 2, 1), 0) del buf0 buf5 = empty_strided_cuda((4, 4, 4, 2), (32, 8, 2, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(128)](buf1, primals_2, buf5, 128, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 2), ( 2, 1), 0), reinterpret_tensor(primals_4, (2, 4), (1, 2), 0), alpha=1, beta=1, out=buf2) del primals_5 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_1[grid(256)](buf2, buf3, 256, XBLOCK=128, num_warps=4, num_stages=1) buf4 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf2 triton_poi_fused__softmax_2[grid(256)](buf3, buf4, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf3 return buf4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 2), (2, 1), 0), buf4, primals_4, buf5 class LogsticRegressionNew(nn.Module): def __init__(self, in_dim, n_class): super().__init__() self.fc1 = nn.Linear(in_dim, in_dim // 2) self.fc2 = nn.Linear(in_dim // 2, n_class) def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
Jie-Yuan/Torchappy
LogsticRegression
false
5,403
[ "Apache-2.0" ]
1
e722db1085fa2ff8e0267f7e6745875531c00f8b
https://github.com/Jie-Yuan/Torchappy/tree/e722db1085fa2ff8e0267f7e6745875531c00f8b
import torch import torch.nn as nn import torch.nn.functional as F class Model(nn.Module): def __init__(self, in_dim, n_class): super().__init__() self.fc1 = nn.Linear(in_dim, in_dim // 2) self.fc2 = nn.Linear(in_dim // 2, n_class) def forward(self, x): x = F.relu(self.fc1(x)) x = self.fc2(x) return F.softmax(x, 1) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 4]
ThreeNet
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/xi/cxinm2diazvaifolgj5mdlirdhrupsnijqxfyqwvdbrzs4ur6dtz.py # Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.mean] # Source node to ATen node mapping: # x => convolution # x_1 => mean # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%convolution, [-1, -2], True), kwargs = {}) triton_per_fused_convolution_mean_0 = async_compile.triton('triton_per_fused_convolution_mean_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[16, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_convolution_mean_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_convolution_mean_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 16 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x3 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (r2 + (16*x3)), xmask, other=0.0) tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = tl.where(xmask, tmp3, 0) tmp6 = tl.sum(tmp5, 1)[:, None] tmp7 = 16.0 tmp8 = tmp6 / tmp7 tl.debug_barrier() tl.store(in_out_ptr0 + (x3), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (1, 4), (4, 1)) assert_size_stride(primals_7, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.mean] stream0 = get_raw_stream(0) triton_per_fused_convolution_mean_0.run(buf2, buf0, primals_2, 16, 16, grid=grid(16), stream=stream0) del buf0 del primals_2 buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, reinterpret_tensor(buf2, (4, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf3) del primals_5 buf5 = empty_strided_cuda((4, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.addmm] extern_kernels.addmm(primals_7, buf3, reinterpret_tensor(primals_6, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf5) del primals_7 return (buf5, primals_1, primals_3, reinterpret_tensor(buf2, (4, 4), (4, 1), 0), buf3, primals_6, primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class ThreeNet(nn.Module): """ A network with three layers. This is used for testing a network with more than one operation. The network has a convolution layer followed by two fully connected layers. """ def __init__(self, input_dim: 'int', conv_dim: 'int', linear_dim: 'int' ) ->None: super(ThreeNet, self).__init__() self.conv = nn.Conv2d(input_dim, conv_dim, 1, 1) out_dim = 1 self.pool = nn.AdaptiveAvgPool2d((out_dim, out_dim)) self.linear1 = nn.Linear(conv_dim, linear_dim) self.linear2 = nn.Linear(linear_dim, 1) def forward(self, x: 'torch.Tensor') ->torch.Tensor: x = self.conv(x) x = self.pool(x) x = torch.flatten(x, 1) x = self.linear1(x) x = self.linear2(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_dim': 4, 'conv_dim': 4, 'linear_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_convolution_mean_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x3 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (r2 + 16 * x3), xmask, other=0.0) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = tl.where(xmask, tmp3, 0) tmp6 = tl.sum(tmp5, 1)[:, None] tmp7 = 16.0 tmp8 = tmp6 / tmp7 tl.debug_barrier() tl.store(in_out_ptr0 + x3, tmp8, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (1, 4), (4, 1)) assert_size_stride(primals_7, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) buf2 = buf1 del buf1 get_raw_stream(0) triton_per_fused_convolution_mean_0[grid(16)](buf2, buf0, primals_2, 16, 16, XBLOCK=8, num_warps=2, num_stages=1) del buf0 del primals_2 buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf2, (4, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha =1, beta=1, out=buf3) del primals_5 buf5 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_7, buf3, reinterpret_tensor(primals_6, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf5) del primals_7 return buf5, primals_1, primals_3, reinterpret_tensor(buf2, (4, 4), (4, 1), 0), buf3, primals_6, primals_4 class ThreeNetNew(nn.Module): """ A network with three layers. This is used for testing a network with more than one operation. The network has a convolution layer followed by two fully connected layers. """ def __init__(self, input_dim: 'int', conv_dim: 'int', linear_dim: 'int' ) ->None: super(ThreeNetNew, self).__init__() self.conv = nn.Conv2d(input_dim, conv_dim, 1, 1) out_dim = 1 self.pool = nn.AdaptiveAvgPool2d((out_dim, out_dim)) self.linear1 = nn.Linear(conv_dim, linear_dim) self.linear2 = nn.Linear(linear_dim, 1) def forward(self, input_0): primals_1 = self.conv.weight primals_2 = self.conv.bias primals_4 = self.linear1.weight primals_5 = self.linear1.bias primals_6 = self.linear2.weight primals_7 = self.linear2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
Jennifer-Rigdon/fvcore
ThreeNet
false
5,404
[ "Apache-2.0" ]
1
7e800a86f2df93da017e07380543b4060ab88c94
https://github.com/Jennifer-Rigdon/fvcore/tree/7e800a86f2df93da017e07380543b4060ab88c94
import torch import torch.nn as nn class Model(nn.Module): """ A network with three layers. This is used for testing a network with more than one operation. The network has a convolution layer followed by two fully connected layers. """ def __init__(self, input_dim: 'int', conv_dim: 'int', linear_dim: 'int' ) ->None: super().__init__() self.conv = nn.Conv2d(input_dim, conv_dim, 1, 1) out_dim = 1 self.pool = nn.AdaptiveAvgPool2d((out_dim, out_dim)) self.linear1 = nn.Linear(conv_dim, linear_dim) self.linear2 = nn.Linear(linear_dim, 1) def forward(self, x: 'torch.Tensor') ->torch.Tensor: x = self.conv(x) x = self.pool(x) x = torch.flatten(x, 1) x = self.linear1(x) x = self.linear2(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 4, 4]
FeatureL2Norm
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/t6/ct6zrt2qja6lep62npxlgnz35a6eevas3jybmmdtt25zb7pb54hx.py # Topologically Sorted Source Nodes: [div], Original ATen: [aten.div] # Source node to ATen node mapping: # div => div # Graph fragment: # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%arg0_1, %expand), kwargs = {}) triton_poi_fused_div_0 = async_compile.triton('triton_poi_fused_div_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = 1e-06 tmp13 = tmp11 + tmp12 tmp14 = libdevice.sqrt(tmp13) tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + (x3), tmp15, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [div], Original ATen: [aten.div] stream0 = get_raw_stream(0) triton_poi_fused_div_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn class FeatureL2Norm(nn.Module): """ Implementation by Ignacio Rocco paper: https://arxiv.org/abs/1703.05593 project: https://github.com/ignacio-rocco/cnngeometric_pytorch """ def __init__(self): super(FeatureL2Norm, self).__init__() def forward(self, feature): epsilon = 1e-06 norm = torch.pow(torch.sum(torch.pow(feature, 2), 1) + epsilon, 0.5 ).unsqueeze(1).expand_as(feature) return torch.div(feature, norm) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = 1e-06 tmp13 = tmp11 + tmp12 tmp14 = libdevice.sqrt(tmp13) tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + x3, tmp15, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf0, class FeatureL2NormNew(nn.Module): """ Implementation by Ignacio Rocco paper: https://arxiv.org/abs/1703.05593 project: https://github.com/ignacio-rocco/cnngeometric_pytorch """ def __init__(self): super(FeatureL2NormNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
JiwonCocoder/-Joint-Learning-of-Feature-Extraction-and-Cost-Aggregation-for-Semantic-Matching
FeatureL2Norm
false
5,405
[ "MIT" ]
1
b79e0e20fd5a1a9ddc0ffa9d7a92e0ebd21018b9
https://github.com/JiwonCocoder/-Joint-Learning-of-Feature-Extraction-and-Cost-Aggregation-for-Semantic-Matching/tree/b79e0e20fd5a1a9ddc0ffa9d7a92e0ebd21018b9
import torch import torch.nn as nn import torch.nn class Model(nn.Module): """ Implementation by Ignacio Rocco paper: https://arxiv.org/abs/1703.05593 project: https://github.com/ignacio-rocco/cnngeometric_pytorch """ def __init__(self): super().__init__() def forward(self, feature): epsilon = 1e-06 norm = torch.pow(torch.sum(torch.pow(feature, 2), 1) + epsilon, 0.5 ).unsqueeze(1).expand_as(feature) return torch.div(feature, norm) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
Flip
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/mw/cmwuhpoo35erq3s5jprdn2bal2k4sfcwtsv6hgm3szdtd6g2t2ew.py # Topologically Sorted Source Nodes: [flip], Original ATen: [aten.flip] # Source node to ATen node mapping: # flip => rev # Graph fragment: # %rev : [num_users=1] = call_function[target=torch.ops.prims.rev.default](args = (%arg0_1, [3]), kwargs = {}) triton_poi_fused_flip_0 = async_compile.triton('triton_poi_fused_flip_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_flip_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_flip_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (3 + ((-1)*x0) + (4*x1)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2), tmp0, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [flip], Original ATen: [aten.flip] stream0 = get_raw_stream(0) triton_poi_fused_flip_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.init class Flip(nn.Module): """Does horizontal or vertical flip on a BCHW tensor. Args: horizontal (bool): If True, applies horizontal flip. Else, vertical flip is applied. Default = True ** Not recommended for CPU (Pillow/OpenCV based functions are faster). """ def __init__(self, horizontal: 'bool'=True): super(Flip, self).__init__() self.horizontal = horizontal def forward(self, tensor: 'torch.Tensor'): return tensor.flip(3 if self.horizontal else 2) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.nn.init assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_flip_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (3 + -1 * x0 + 4 * x1), xmask, eviction_policy ='evict_last') tl.store(out_ptr0 + x2, tmp0, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_flip_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class FlipNew(nn.Module): """Does horizontal or vertical flip on a BCHW tensor. Args: horizontal (bool): If True, applies horizontal flip. Else, vertical flip is applied. Default = True ** Not recommended for CPU (Pillow/OpenCV based functions are faster). """ def __init__(self, horizontal: 'bool'=True): super(FlipNew, self).__init__() self.horizontal = horizontal def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
Johnson-yue/TensorMONK
Flip
false
5,406
[ "MIT" ]
1
1785132b82c685c3b3fc05b00dec46b1fccfc948
https://github.com/Johnson-yue/TensorMONK/tree/1785132b82c685c3b3fc05b00dec46b1fccfc948
import torch import torch.nn as nn import torch.nn.init class Model(nn.Module): """Does horizontal or vertical flip on a BCHW tensor. Args: horizontal (bool): If True, applies horizontal flip. Else, vertical flip is applied. Default = True ** Not recommended for CPU (Pillow/OpenCV based functions are faster). """ def __init__(self, horizontal: 'bool'=True): super().__init__() self.horizontal = horizontal def forward(self, tensor: 'torch.Tensor'): return tensor.flip(3 if self.horizontal else 2) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
CorrelationVolume
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/ez/cezmv74yrhrunjwqrletcmzzbnanma4ylsle3v7w345t7kxp622s.py # Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone] # Source node to ATen node mapping: # contiguous => clone # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone] stream0 = get_raw_stream(0) triton_poi_fused_clone_0.run(arg0_1, buf0, 64, 4, grid=grid(64, 4), stream=stream0) del arg0_1 buf1 = empty_strided_cuda((4, 16, 16), (256, 16, 1), torch.float32) # Topologically Sorted Source Nodes: [feature_mul], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(arg1_1, (4, 16, 4), (64, 1, 16), 0), reinterpret_tensor(buf0, (4, 4, 16), (64, 16, 1), 0), out=buf1) del arg1_1 del buf0 return (reinterpret_tensor(buf1, (4, 16, 4, 4), (256, 1, 64, 16), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn class CorrelationVolume(nn.Module): """ Implementation by Ignacio Rocco paper: https://arxiv.org/abs/1703.05593 project: https://github.com/ignacio-rocco/cnngeometric_pytorch """ def __init__(self): super(CorrelationVolume, self).__init__() def forward(self, feature_A, feature_B): b, c, h, w = feature_A.size() feature_A = feature_A.transpose(2, 3).contiguous().view(b, c, h * w) feature_B = feature_B.view(b, c, h * w).transpose(1, 2) feature_mul = torch.bmm(feature_B, feature_A) correlation_tensor = feature_mul.view(b, h, w, h * w).transpose(2, 3 ).transpose(1, 2) return correlation_tensor def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(64, 4)](arg0_1, buf0, 64, 4, XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1) del arg0_1 buf1 = empty_strided_cuda((4, 16, 16), (256, 16, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(arg1_1, (4, 16, 4), (64, 1, 16), 0), reinterpret_tensor(buf0, (4, 4, 16), (64, 16, 1), 0), out=buf1) del arg1_1 del buf0 return reinterpret_tensor(buf1, (4, 16, 4, 4), (256, 1, 64, 16), 0), class CorrelationVolumeNew(nn.Module): """ Implementation by Ignacio Rocco paper: https://arxiv.org/abs/1703.05593 project: https://github.com/ignacio-rocco/cnngeometric_pytorch """ def __init__(self): super(CorrelationVolumeNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
JiwonCocoder/-Joint-Learning-of-Feature-Extraction-and-Cost-Aggregation-for-Semantic-Matching
CorrelationVolume
false
5,407
[ "MIT" ]
1
b79e0e20fd5a1a9ddc0ffa9d7a92e0ebd21018b9
https://github.com/JiwonCocoder/-Joint-Learning-of-Feature-Extraction-and-Cost-Aggregation-for-Semantic-Matching/tree/b79e0e20fd5a1a9ddc0ffa9d7a92e0ebd21018b9
import torch import torch.nn as nn import torch.nn class Model(nn.Module): """ Implementation by Ignacio Rocco paper: https://arxiv.org/abs/1703.05593 project: https://github.com/ignacio-rocco/cnngeometric_pytorch """ def __init__(self): super().__init__() def forward(self, feature_A, feature_B): b, c, h, w = feature_A.size() feature_A = feature_A.transpose(2, 3).contiguous().view(b, c, h * w) feature_B = feature_B.view(b, c, h * w).transpose(1, 2) feature_mul = torch.bmm(feature_B, feature_A) correlation_tensor = feature_mul.view(b, h, w, h * w).transpose(2, 3 ).transpose(1, 2) return correlation_tensor def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
FeatureCorrelation
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/ez/cezmv74yrhrunjwqrletcmzzbnanma4ylsle3v7w345t7kxp622s.py # Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone] # Source node to ATen node mapping: # contiguous => clone # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/sy/csyhswlvyuo2qzwowqiku2qw4gif3we4ukgp4arsneopazxj4fpo.py # Topologically Sorted Source Nodes: [relu, pow_1, sum_1, correlation_tensor_1], Original ATen: [aten.relu, aten.pow, aten.sum, aten.div] # Source node to ATen node mapping: # correlation_tensor_1 => div # pow_1 => pow_1 # relu => relu # sum_1 => sum_1 # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%permute_3,), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%relu, 2), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1]), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%relu, %expand), kwargs = {}) triton_per_fused_div_pow_relu_sum_1 = async_compile.triton('triton_per_fused_div_pow_relu_sum_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[64, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_div_pow_relu_sum_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_div_pow_relu_sum_1(in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 64 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0) tmp1 = tl.full([1, 1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp3 = tmp2 * tmp2 tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = 1e-06 tmp9 = tmp7 + tmp8 tmp10 = libdevice.sqrt(tmp9) tmp11 = tmp2 / tmp10 tl.store(out_ptr1 + (r1 + (16*x0)), tmp11, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone] stream0 = get_raw_stream(0) triton_poi_fused_clone_0.run(arg0_1, buf0, 64, 4, grid=grid(64, 4), stream=stream0) del arg0_1 buf1 = empty_strided_cuda((4, 16, 16), (256, 16, 1), torch.float32) # Topologically Sorted Source Nodes: [feature_mul], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(arg1_1, (4, 16, 4), (64, 1, 16), 0), reinterpret_tensor(buf0, (4, 4, 16), (64, 16, 1), 0), out=buf1) del arg1_1 del buf0 buf3 = empty_strided_cuda((4, 16, 4, 4), (256, 1, 64, 16), torch.float32) # Topologically Sorted Source Nodes: [relu, pow_1, sum_1, correlation_tensor_1], Original ATen: [aten.relu, aten.pow, aten.sum, aten.div] triton_per_fused_div_pow_relu_sum_1.run(buf1, buf3, 64, 16, grid=grid(64), stream=stream0) del buf1 return (buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn def featureL2Norm(feature): epsilon = 1e-06 norm = torch.pow(torch.sum(torch.pow(feature, 2), 1) + epsilon, 0.5 ).unsqueeze(1).expand_as(feature) return torch.div(feature, norm) class FeatureCorrelation(torch.nn.Module): def __init__(self, shape='3D', normalization=True): super(FeatureCorrelation, self).__init__() self.normalization = normalization self.shape = shape self.ReLU = nn.ReLU() def forward(self, feature_A, feature_B): if self.shape == '3D': b, c, h, w = feature_A.size() feature_A = feature_A.transpose(2, 3).contiguous().view(b, c, h * w ) feature_B = feature_B.view(b, c, h * w).transpose(1, 2) feature_mul = torch.bmm(feature_B, feature_A) correlation_tensor = feature_mul.view(b, h, w, h * w).transpose( 2, 3).transpose(1, 2) elif self.shape == '4D': b, c, hA, wA = feature_A.size() b, c, hB, wB = feature_B.size() feature_A = feature_A.view(b, c, hA * wA).transpose(1, 2) feature_B = feature_B.view(b, c, hB * wB) feature_mul = torch.bmm(feature_A, feature_B) correlation_tensor = feature_mul.view(b, hA, wA, hB, wB).unsqueeze( 1) if self.normalization: correlation_tensor = featureL2Norm(self.ReLU(correlation_tensor)) return correlation_tensor def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_per_fused_div_pow_relu_sum_1(in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 64 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tl.full([1, 1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp3 = tmp2 * tmp2 tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = 1e-06 tmp9 = tmp7 + tmp8 tmp10 = libdevice.sqrt(tmp9) tmp11 = tmp2 / tmp10 tl.store(out_ptr1 + (r1 + 16 * x0), tmp11, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(64, 4)](arg0_1, buf0, 64, 4, XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1) del arg0_1 buf1 = empty_strided_cuda((4, 16, 16), (256, 16, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(arg1_1, (4, 16, 4), (64, 1, 16), 0), reinterpret_tensor(buf0, (4, 4, 16), (64, 16, 1), 0), out=buf1) del arg1_1 del buf0 buf3 = empty_strided_cuda((4, 16, 4, 4), (256, 1, 64, 16), torch. float32) triton_per_fused_div_pow_relu_sum_1[grid(64)](buf1, buf3, 64, 16, XBLOCK=32, num_warps=4, num_stages=1) del buf1 return buf3, def featureL2Norm(feature): epsilon = 1e-06 norm = torch.pow(torch.sum(torch.pow(feature, 2), 1) + epsilon, 0.5 ).unsqueeze(1).expand_as(feature) return torch.div(feature, norm) class FeatureCorrelationNew(torch.nn.Module): def __init__(self, shape='3D', normalization=True): super(FeatureCorrelationNew, self).__init__() self.normalization = normalization self.shape = shape self.ReLU = nn.ReLU() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
JiwonCocoder/-Joint-Learning-of-Feature-Extraction-and-Cost-Aggregation-for-Semantic-Matching
FeatureCorrelation
false
5,408
[ "MIT" ]
1
b79e0e20fd5a1a9ddc0ffa9d7a92e0ebd21018b9
https://github.com/JiwonCocoder/-Joint-Learning-of-Feature-Extraction-and-Cost-Aggregation-for-Semantic-Matching/tree/b79e0e20fd5a1a9ddc0ffa9d7a92e0ebd21018b9
import torch import torch.nn as nn import torch.nn def featureL2Norm(feature): epsilon = 1e-06 norm = torch.pow(torch.sum(torch.pow(feature, 2), 1) + epsilon, 0.5 ).unsqueeze(1).expand_as(feature) return torch.div(feature, norm) class Model(torch.nn.Module): def __init__(self, shape='3D', normalization=True): super().__init__() self.normalization = normalization self.shape = shape self.ReLU = nn.ReLU() def forward(self, feature_A, feature_B): if self.shape == '3D': b, c, h, w = feature_A.size() feature_A = feature_A.transpose(2, 3).contiguous().view(b, c, h * w ) feature_B = feature_B.view(b, c, h * w).transpose(1, 2) feature_mul = torch.bmm(feature_B, feature_A) correlation_tensor = feature_mul.view(b, h, w, h * w).transpose( 2, 3).transpose(1, 2) elif self.shape == '4D': b, c, hA, wA = feature_A.size() b, c, hB, wB = feature_B.size() feature_A = feature_A.view(b, c, hA * wA).transpose(1, 2) feature_B = feature_B.view(b, c, hB * wB) feature_mul = torch.bmm(feature_A, feature_B) correlation_tensor = feature_mul.view(b, hA, wA, hB, wB).unsqueeze( 1) if self.normalization: correlation_tensor = featureL2Norm(self.ReLU(correlation_tensor)) return correlation_tensor def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
GaussianFocalLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/mc/cmcjnrkcniypkwkusrjjxvk7xd3n4rxefn5nz4mqn47gca6icbsg.py # Topologically Sorted Source Nodes: [add, log, neg, sub_1, pow_2, mul, pos_weights, pos_loss, sub_2, add_1, log_1, neg_1, pow_3, mul_2, sub, neg_weights, neg_loss, loss, loss_1, loss_reg], Original ATen: [aten.add, aten.log, aten.neg, aten.rsub, aten.pow, aten.mul, aten.eq, aten.mean] # Source node to ATen node mapping: # add => add # add_1 => add_1 # log => log # log_1 => log_1 # loss => add_2 # loss_1 => mean # loss_reg => mul_4 # mul => mul # mul_2 => mul_2 # neg => neg # neg_1 => neg_1 # neg_loss => mul_3 # neg_weights => pow_1 # pos_loss => mul_1 # pos_weights => eq # pow_2 => pow_2 # pow_3 => pow_3 # sub => sub # sub_1 => sub_1 # sub_2 => sub_2 # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg0_1, 1e-12), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add,), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%log,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg0_1), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_1, 2.0), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%neg, %pow_2), kwargs = {}) # %eq : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%arg1_1, 1), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %eq), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg0_1), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub_2, 1e-12), kwargs = {}) # %log_1 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add_1,), kwargs = {}) # %neg_1 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%log_1,), kwargs = {}) # %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg0_1, 2.0), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%neg_1, %pow_3), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg1_1), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 4.0), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_2, %pow_1), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %mul_3), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%add_2,), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean, 1.0), kwargs = {}) triton_per_fused_add_eq_log_mean_mul_neg_pow_rsub_0 = async_compile.triton('triton_per_fused_add_eq_log_mean_mul_neg_pow_rsub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_eq_log_mean_mul_neg_pow_rsub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_eq_log_mean_mul_neg_pow_rsub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp9 = tl.load(in_ptr1 + (r0), None) tmp1 = 1e-12 tmp2 = tmp0 + tmp1 tmp3 = tl_math.log(tmp2) tmp4 = -tmp3 tmp5 = 1.0 tmp6 = tmp5 - tmp0 tmp7 = tmp6 * tmp6 tmp8 = tmp4 * tmp7 tmp10 = tmp9 == tmp5 tmp11 = tmp10.to(tl.float32) tmp12 = tmp8 * tmp11 tmp13 = tmp6 + tmp1 tmp14 = tl_math.log(tmp13) tmp15 = -tmp14 tmp16 = tmp0 * tmp0 tmp17 = tmp15 * tmp16 tmp18 = tmp5 - tmp9 tmp19 = tmp18 * tmp18 tmp20 = tmp19 * tmp19 tmp21 = tmp17 * tmp20 tmp22 = tmp12 + tmp21 tmp23 = tl.broadcast_to(tmp22, [RBLOCK]) tmp25 = triton_helpers.promote_to_tensor(tl.sum(tmp23, 0)) tmp26 = 256.0 tmp27 = tmp25 / tmp26 tmp28 = tmp27 * tmp5 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp28, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [add, log, neg, sub_1, pow_2, mul, pos_weights, pos_loss, sub_2, add_1, log_1, neg_1, pow_3, mul_2, sub, neg_weights, neg_loss, loss, loss_1, loss_reg], Original ATen: [aten.add, aten.log, aten.neg, aten.rsub, aten.pow, aten.mul, aten.eq, aten.mean] stream0 = get_raw_stream(0) triton_per_fused_add_eq_log_mean_mul_neg_pow_rsub_0.run(buf1, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import functools import torch import torch.nn.functional as F import torch.nn as nn import torch.utils.data def reduce_loss(loss, reduction): """Reduce loss as specified. Args: loss (Tensor): Elementwise loss tensor. reduction (str): Options are "none", "mean" and "sum". Return: Tensor: Reduced loss tensor. """ reduction_enum = F._Reduction.get_enum(reduction) if reduction_enum == 0: return loss elif reduction_enum == 1: return loss.mean() elif reduction_enum == 2: return loss.sum() def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None): """Apply element-wise weight and reduce loss. Args: loss (Tensor): Element-wise loss. weight (Tensor): Element-wise weights. reduction (str): Same as built-in losses of PyTorch. avg_factor (float): Avarage factor when computing the mean of losses. Returns: Tensor: Processed loss values. """ if weight is not None: loss = loss * weight if avg_factor is None: loss = reduce_loss(loss, reduction) elif reduction == 'mean': loss = loss.sum() / avg_factor elif reduction != 'none': raise ValueError('avg_factor can not be used with reduction="sum"') return loss def weighted_loss(loss_func): """Create a weighted version of a given loss function. To use this decorator, the loss function must have the signature like `loss_func(pred, target, **kwargs)`. The function only needs to compute element-wise loss without any reduction. This decorator will add weight and reduction arguments to the function. The decorated function will have the signature like `loss_func(pred, target, weight=None, reduction='mean', avg_factor=None, **kwargs)`. :Example: >>> import torch >>> @weighted_loss >>> def l1_loss(pred, target): >>> return (pred - target).abs() >>> pred = torch.Tensor([0, 2, 3]) >>> target = torch.Tensor([1, 1, 1]) >>> weight = torch.Tensor([1, 0, 1]) >>> l1_loss(pred, target) tensor(1.3333) >>> l1_loss(pred, target, weight) tensor(1.) >>> l1_loss(pred, target, reduction='none') tensor([1., 1., 2.]) >>> l1_loss(pred, target, weight, avg_factor=2) tensor(1.5000) """ @functools.wraps(loss_func) def wrapper(pred, target, weight=None, reduction='mean', avg_factor= None, **kwargs): loss = loss_func(pred, target, **kwargs) loss = weight_reduce_loss(loss, weight, reduction, avg_factor) return loss return wrapper @weighted_loss def gaussian_focal_loss(pred, gaussian_target, alpha=2.0, gamma=4.0): """`Focal Loss <https://arxiv.org/abs/1708.02002>`_ for targets in gaussian distribution. Args: pred (torch.Tensor): The prediction. gaussian_target (torch.Tensor): The learning target of the prediction in gaussian distribution. alpha (float, optional): A balanced form for Focal Loss. Defaults to 2.0. gamma (float, optional): The gamma for calculating the modulating factor. Defaults to 4.0. """ eps = 1e-12 pos_weights = gaussian_target.eq(1) neg_weights = (1 - gaussian_target).pow(gamma) pos_loss = -(pred + eps).log() * (1 - pred).pow(alpha) * pos_weights neg_loss = -(1 - pred + eps).log() * pred.pow(alpha) * neg_weights return pos_loss + neg_loss class GaussianFocalLoss(nn.Module): """GaussianFocalLoss is a variant of focal loss. More details can be found in the `paper <https://arxiv.org/abs/1808.01244>`_ Code is modified from `kp_utils.py <https://github.com/princeton-vl/CornerNet/blob/master/models/py_utils/kp_utils.py#L152>`_ # noqa: E501 Please notice that the target in GaussianFocalLoss is a gaussian heatmap, not 0/1 binary target. Args: alpha (float): Power of prediction. gamma (float): Power of target for negtive samples. reduction (str): Options are "none", "mean" and "sum". loss_weight (float): Loss weight of current loss. """ def __init__(self, alpha=2.0, gamma=4.0, reduction='mean', loss_weight=1.0 ): super(GaussianFocalLoss, self).__init__() self.alpha = alpha self.gamma = gamma self.reduction = reduction self.loss_weight = loss_weight def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None): """Forward function. Args: pred (torch.Tensor): The prediction. target (torch.Tensor): The learning target of the prediction in gaussian distribution. weight (torch.Tensor, optional): The weight of loss for each prediction. Defaults to None. avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. reduction_override (str, optional): The reduction method used to override the original reduction method of the loss. Defaults to None. """ assert reduction_override in (None, 'none', 'mean', 'sum') reduction = (reduction_override if reduction_override else self. reduction) loss_reg = self.loss_weight * gaussian_focal_loss(pred, target, weight, alpha=self.alpha, gamma=self.gamma, reduction=reduction, avg_factor=avg_factor) return loss_reg def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import functools import torch.nn.functional as F import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_eq_log_mean_mul_neg_pow_rsub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp9 = tl.load(in_ptr1 + r0, None) tmp1 = 1e-12 tmp2 = tmp0 + tmp1 tmp3 = tl_math.log(tmp2) tmp4 = -tmp3 tmp5 = 1.0 tmp6 = tmp5 - tmp0 tmp7 = tmp6 * tmp6 tmp8 = tmp4 * tmp7 tmp10 = tmp9 == tmp5 tmp11 = tmp10.to(tl.float32) tmp12 = tmp8 * tmp11 tmp13 = tmp6 + tmp1 tmp14 = tl_math.log(tmp13) tmp15 = -tmp14 tmp16 = tmp0 * tmp0 tmp17 = tmp15 * tmp16 tmp18 = tmp5 - tmp9 tmp19 = tmp18 * tmp18 tmp20 = tmp19 * tmp19 tmp21 = tmp17 * tmp20 tmp22 = tmp12 + tmp21 tmp23 = tl.broadcast_to(tmp22, [RBLOCK]) tmp25 = triton_helpers.promote_to_tensor(tl.sum(tmp23, 0)) tmp26 = 256.0 tmp27 = tmp25 / tmp26 tmp28 = tmp27 * tmp5 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp28, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_add_eq_log_mean_mul_neg_pow_rsub_0[grid(1)](buf1, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, def reduce_loss(loss, reduction): """Reduce loss as specified. Args: loss (Tensor): Elementwise loss tensor. reduction (str): Options are "none", "mean" and "sum". Return: Tensor: Reduced loss tensor. """ reduction_enum = F._Reduction.get_enum(reduction) if reduction_enum == 0: return loss elif reduction_enum == 1: return loss.mean() elif reduction_enum == 2: return loss.sum() def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None): """Apply element-wise weight and reduce loss. Args: loss (Tensor): Element-wise loss. weight (Tensor): Element-wise weights. reduction (str): Same as built-in losses of PyTorch. avg_factor (float): Avarage factor when computing the mean of losses. Returns: Tensor: Processed loss values. """ if weight is not None: loss = loss * weight if avg_factor is None: loss = reduce_loss(loss, reduction) elif reduction == 'mean': loss = loss.sum() / avg_factor elif reduction != 'none': raise ValueError('avg_factor can not be used with reduction="sum"') return loss def weighted_loss(loss_func): """Create a weighted version of a given loss function. To use this decorator, the loss function must have the signature like `loss_func(pred, target, **kwargs)`. The function only needs to compute element-wise loss without any reduction. This decorator will add weight and reduction arguments to the function. The decorated function will have the signature like `loss_func(pred, target, weight=None, reduction='mean', avg_factor=None, **kwargs)`. :Example: >>> import torch >>> @weighted_loss >>> def l1_loss(pred, target): >>> return (pred - target).abs() >>> pred = torch.Tensor([0, 2, 3]) >>> target = torch.Tensor([1, 1, 1]) >>> weight = torch.Tensor([1, 0, 1]) >>> l1_loss(pred, target) tensor(1.3333) >>> l1_loss(pred, target, weight) tensor(1.) >>> l1_loss(pred, target, reduction='none') tensor([1., 1., 2.]) >>> l1_loss(pred, target, weight, avg_factor=2) tensor(1.5000) """ @functools.wraps(loss_func) def wrapper(pred, target, weight=None, reduction='mean', avg_factor= None, **kwargs): loss = loss_func(pred, target, **kwargs) loss = weight_reduce_loss(loss, weight, reduction, avg_factor) return loss return wrapper @weighted_loss def gaussian_focal_loss(pred, gaussian_target, alpha=2.0, gamma=4.0): """`Focal Loss <https://arxiv.org/abs/1708.02002>`_ for targets in gaussian distribution. Args: pred (torch.Tensor): The prediction. gaussian_target (torch.Tensor): The learning target of the prediction in gaussian distribution. alpha (float, optional): A balanced form for Focal Loss. Defaults to 2.0. gamma (float, optional): The gamma for calculating the modulating factor. Defaults to 4.0. """ eps = 1e-12 pos_weights = gaussian_target.eq(1) neg_weights = (1 - gaussian_target).pow(gamma) pos_loss = -(pred + eps).log() * (1 - pred).pow(alpha) * pos_weights neg_loss = -(1 - pred + eps).log() * pred.pow(alpha) * neg_weights return pos_loss + neg_loss class GaussianFocalLossNew(nn.Module): """GaussianFocalLoss is a variant of focal loss. More details can be found in the `paper <https://arxiv.org/abs/1808.01244>`_ Code is modified from `kp_utils.py <https://github.com/princeton-vl/CornerNet/blob/master/models/py_utils/kp_utils.py#L152>`_ # noqa: E501 Please notice that the target in GaussianFocalLoss is a gaussian heatmap, not 0/1 binary target. Args: alpha (float): Power of prediction. gamma (float): Power of target for negtive samples. reduction (str): Options are "none", "mean" and "sum". loss_weight (float): Loss weight of current loss. """ def __init__(self, alpha=2.0, gamma=4.0, reduction='mean', loss_weight=1.0 ): super(GaussianFocalLossNew, self).__init__() self.alpha = alpha self.gamma = gamma self.reduction = reduction self.loss_weight = loss_weight def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
JunHyungKang/SAROD_ICIP
GaussianFocalLoss
false
5,409
[ "MIT" ]
1
71585951f64dc1cc22ed72900eff81f747edec77
https://github.com/JunHyungKang/SAROD_ICIP/tree/71585951f64dc1cc22ed72900eff81f747edec77
import functools import torch import torch.nn.functional as F import torch.nn as nn import torch.utils.data def reduce_loss(loss, reduction): """Reduce loss as specified. Args: loss (Tensor): Elementwise loss tensor. reduction (str): Options are "none", "mean" and "sum". Return: Tensor: Reduced loss tensor. """ reduction_enum = F._Reduction.get_enum(reduction) if reduction_enum == 0: return loss elif reduction_enum == 1: return loss.mean() elif reduction_enum == 2: return loss.sum() def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None): """Apply element-wise weight and reduce loss. Args: loss (Tensor): Element-wise loss. weight (Tensor): Element-wise weights. reduction (str): Same as built-in losses of PyTorch. avg_factor (float): Avarage factor when computing the mean of losses. Returns: Tensor: Processed loss values. """ if weight is not None: loss = loss * weight if avg_factor is None: loss = reduce_loss(loss, reduction) elif reduction == 'mean': loss = loss.sum() / avg_factor elif reduction != 'none': raise ValueError('avg_factor can not be used with reduction="sum"') return loss def weighted_loss(loss_func): """Create a weighted version of a given loss function. To use this decorator, the loss function must have the signature like `loss_func(pred, target, **kwargs)`. The function only needs to compute element-wise loss without any reduction. This decorator will add weight and reduction arguments to the function. The decorated function will have the signature like `loss_func(pred, target, weight=None, reduction='mean', avg_factor=None, **kwargs)`. :Example: >>> import torch >>> @weighted_loss >>> def l1_loss(pred, target): >>> return (pred - target).abs() >>> pred = torch.Tensor([0, 2, 3]) >>> target = torch.Tensor([1, 1, 1]) >>> weight = torch.Tensor([1, 0, 1]) >>> l1_loss(pred, target) tensor(1.3333) >>> l1_loss(pred, target, weight) tensor(1.) >>> l1_loss(pred, target, reduction='none') tensor([1., 1., 2.]) >>> l1_loss(pred, target, weight, avg_factor=2) tensor(1.5000) """ @functools.wraps(loss_func) def wrapper(pred, target, weight=None, reduction='mean', avg_factor= None, **kwargs): loss = loss_func(pred, target, **kwargs) loss = weight_reduce_loss(loss, weight, reduction, avg_factor) return loss return wrapper @weighted_loss def gaussian_focal_loss(pred, gaussian_target, alpha=2.0, gamma=4.0): """`Focal Loss <https://arxiv.org/abs/1708.02002>`_ for targets in gaussian distribution. Args: pred (torch.Tensor): The prediction. gaussian_target (torch.Tensor): The learning target of the prediction in gaussian distribution. alpha (float, optional): A balanced form for Focal Loss. Defaults to 2.0. gamma (float, optional): The gamma for calculating the modulating factor. Defaults to 4.0. """ eps = 1e-12 pos_weights = gaussian_target.eq(1) neg_weights = (1 - gaussian_target).pow(gamma) pos_loss = -(pred + eps).log() * (1 - pred).pow(alpha) * pos_weights neg_loss = -(1 - pred + eps).log() * pred.pow(alpha) * neg_weights return pos_loss + neg_loss class Model(nn.Module): """GaussianFocalLoss is a variant of focal loss. More details can be found in the `paper <https://arxiv.org/abs/1808.01244>`_ Code is modified from `kp_utils.py <https://github.com/princeton-vl/CornerNet/blob/master/models/py_utils/kp_utils.py#L152>`_ # noqa: E501 Please notice that the target in GaussianFocalLoss is a gaussian heatmap, not 0/1 binary target. Args: alpha (float): Power of prediction. gamma (float): Power of target for n # ... truncated (>4000 chars) for memory efficiency
PCENlr
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/yd/cydbtjoq352gcolmflbvu2nqkda7xg7q5hnvltb47jsg5dbmubym.py # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.clone] # Source node to ATen node mapping: # linear => clone # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/qu/cqukmrxa6etdpgvb2u23hdcpcjzslz6rrkiupkx3762mhtweeoww.py # Topologically Sorted Source Nodes: [alpha, delta, rho, add, m, add_1, pow_1, truediv, add_2, pow_2, pow_3, pcen_out], Original ATen: [aten.exp, aten.add, aten.relu, aten.pow, aten.div, aten.sub] # Source node to ATen node mapping: # add => add # add_1 => add_1 # add_2 => add_2 # alpha => exp # delta => exp_1 # m => relu # pcen_out => sub # pow_1 => pow_1 # pow_2 => pow_2 # pow_3 => pow_3 # rho => exp_2 # truediv => div # Graph fragment: # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%expand,), kwargs = {}) # %exp_1 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%expand_1,), kwargs = {}) # %exp_2 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%expand_2,), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%permute_3, %primals_2), kwargs = {}) # %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%add,), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%relu, 0.1), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%add_1, %exp), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_2, %pow_1), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%div, %exp_1), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%add_2, %exp_2), kwargs = {}) # %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%exp_1, %exp_2), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%pow_2, %pow_3), kwargs = {}) triton_poi_fused_add_div_exp_pow_relu_sub_1 = async_compile.triton('triton_poi_fused_add_div_exp_pow_relu_sub_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_exp_pow_relu_sub_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_exp_pow_relu_sub_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 4 y1 = (yindex // 4) tmp0 = tl.load(in_ptr0 + (x2 + (4*y3)), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr2 + (x2), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr3 + (x2), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr4 + (x2), xmask, eviction_policy='evict_last') tmp2 = tmp1 + tmp0 tmp3 = tl.full([1, 1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.1 tmp6 = tmp4 + tmp5 tmp8 = tl_math.exp(tmp7) tmp9 = libdevice.pow(tmp6, tmp8) tmp10 = tmp0 / tmp9 tmp12 = tl_math.exp(tmp11) tmp13 = tmp10 + tmp12 tmp15 = tl_math.exp(tmp14) tmp16 = libdevice.pow(tmp13, tmp15) tmp17 = libdevice.pow(tmp12, tmp15) tmp18 = tmp16 - tmp17 tl.store(out_ptr0 + (x2 + (4*y3)), tmp18, xmask & ymask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, ), (1, )) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, ), (1, )) assert_size_stride(primals_5, (1, 4), (4, 1)) assert_size_stride(primals_6, (4, 1), (1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.clone] stream0 = get_raw_stream(0) triton_poi_fused_clone_0.run(primals_2, buf0, 16, 4, grid=grid(16, 4), stream=stream0) buf1 = empty_strided_cuda((16, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 1), (1, 4), 0), out=buf1) del primals_5 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.mm] extern_kernels.mm(buf1, reinterpret_tensor(primals_6, (1, 4), (1, 1), 0), out=buf2) buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [alpha, delta, rho, add, m, add_1, pow_1, truediv, add_2, pow_2, pow_3, pcen_out], Original ATen: [aten.exp, aten.add, aten.relu, aten.pow, aten.div, aten.sub] triton_poi_fused_add_div_exp_pow_relu_sub_1.run(primals_2, buf2, primals_1, primals_3, primals_4, buf3, 16, 4, grid=grid(16, 4), stream=stream0) return (buf3, primals_1, primals_2, primals_3, primals_4, reinterpret_tensor(buf0, (16, 4), (4, 1), 0), buf1, buf2, primals_6, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 1), (1, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class PCENlr(nn.Module): """ A Low-rank version for per-channel energy normalization. """ def __init__(self, N, T): super(PCENlr, self).__init__() self.N = N self.T = T self.lr_enc = nn.Linear(self.T, 1, bias=False) self.lr_dec = nn.Linear(1, self.T, bias=False) self.log_alpha = nn.Parameter((torch.randn(self.N) * 0.1 + 1.0).log_()) self.log_delta = nn.Parameter((torch.randn(self.N) * 0.1 + 2.0).log_()) self.log_rho = nn.Parameter((torch.randn(self.N) * 0.1 + 0.6).log_()) self.relu = nn.ReLU() self.eps = 0.1 self.initialize_parameters() def initialize_parameters(self): torch.nn.init.xavier_normal(self.lr_enc.weight) def forward(self, x): alpha = self.log_alpha.expand_as(x).exp() delta = self.log_delta.expand_as(x).exp() rho = self.log_rho.expand_as(x).exp() m = self.relu(self.lr_dec(self.lr_enc(x.permute(0, 2, 1))).permute( 0, 2, 1) + x) pcen_out = (x / (m + self.eps) ** alpha + delta) ** rho - delta ** rho return pcen_out def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'N': 4, 'T': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_add_div_exp_pow_relu_sub_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 4 y1 = yindex // 4 tmp0 = tl.load(in_ptr0 + (x2 + 4 * y3), xmask & ymask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr1 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr3 + x2, xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr4 + x2, xmask, eviction_policy='evict_last') tmp2 = tmp1 + tmp0 tmp3 = tl.full([1, 1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.1 tmp6 = tmp4 + tmp5 tmp8 = tl_math.exp(tmp7) tmp9 = libdevice.pow(tmp6, tmp8) tmp10 = tmp0 / tmp9 tmp12 = tl_math.exp(tmp11) tmp13 = tmp10 + tmp12 tmp15 = tl_math.exp(tmp14) tmp16 = libdevice.pow(tmp13, tmp15) tmp17 = libdevice.pow(tmp12, tmp15) tmp18 = tmp16 - tmp17 tl.store(out_ptr0 + (x2 + 4 * y3), tmp18, xmask & ymask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4,), (1,)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (1, 4), (4, 1)) assert_size_stride(primals_6, (4, 1), (1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(16, 4)](primals_2, buf0, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((16, 1), (1, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 1), (1, 4), 0), out=buf1) del primals_5 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(buf1, reinterpret_tensor(primals_6, (1, 4), (1, 1 ), 0), out=buf2) buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_div_exp_pow_relu_sub_1[grid(16, 4)](primals_2, buf2, primals_1, primals_3, primals_4, buf3, 16, 4, XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1) return (buf3, primals_1, primals_2, primals_3, primals_4, reinterpret_tensor(buf0, (16, 4), (4, 1), 0), buf1, buf2, primals_6) class PCENlrNew(nn.Module): """ A Low-rank version for per-channel energy normalization. """ def __init__(self, N, T): super(PCENlrNew, self).__init__() self.N = N self.T = T self.lr_enc = nn.Linear(self.T, 1, bias=False) self.lr_dec = nn.Linear(1, self.T, bias=False) self.log_alpha = nn.Parameter((torch.randn(self.N) * 0.1 + 1.0).log_()) self.log_delta = nn.Parameter((torch.randn(self.N) * 0.1 + 2.0).log_()) self.log_rho = nn.Parameter((torch.randn(self.N) * 0.1 + 0.6).log_()) self.relu = nn.ReLU() self.eps = 0.1 self.initialize_parameters() def initialize_parameters(self): torch.nn.init.xavier_normal(self.lr_enc.weight) def forward(self, input_0): primals_1 = self.log_alpha primals_3 = self.log_delta primals_4 = self.log_rho primals_5 = self.lr_enc.weight primals_6 = self.lr_dec.weight primals_2 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
Js-Mim/wagner_vad
PCENlr
false
5,410
[ "MIT" ]
1
cc682bd7a8f496a26fe4be39ea2b2d68e493c5ba
https://github.com/Js-Mim/wagner_vad/tree/cc682bd7a8f496a26fe4be39ea2b2d68e493c5ba
import torch import torch.nn as nn class Model(nn.Module): """ A Low-rank version for per-channel energy normalization. """ def __init__(self, N, T): super().__init__() self.N = N self.T = T self.lr_enc = nn.Linear(self.T, 1, bias=False) self.lr_dec = nn.Linear(1, self.T, bias=False) self.log_alpha = nn.Parameter((torch.randn(self.N) * 0.1 + 1.0).log_()) self.log_delta = nn.Parameter((torch.randn(self.N) * 0.1 + 2.0).log_()) self.log_rho = nn.Parameter((torch.randn(self.N) * 0.1 + 0.6).log_()) self.relu = nn.ReLU() self.eps = 0.1 self.initialize_parameters() def initialize_parameters(self): torch.nn.init.xavier_normal(self.lr_enc.weight) def forward(self, x): alpha = self.log_alpha.expand_as(x).exp() delta = self.log_delta.expand_as(x).exp() rho = self.log_rho.expand_as(x).exp() m = self.relu(self.lr_dec(self.lr_enc(x.permute(0, 2, 1))).permute( 0, 2, 1) + x) pcen_out = (x / (m + self.eps) ** alpha + delta) ** rho - delta ** rho return pcen_out def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [4, 4]
ConvHeadPooling
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/6r/c6rkywilfuy64m6cuftgkvjhytprq2kemqwqphmypsicig6wdmin.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] # Source node to ATen node mapping: # x => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 4), kwargs = {}) triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 9) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 1, 2, 2), (4, 4, 2, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=4, bias=None) assert_size_stride(buf0, (4, 4, 3, 3), (36, 9, 3, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] stream0 = get_raw_stream(0) triton_poi_fused_convolution_0.run(buf1, primals_2, 144, grid=grid(144), stream=stream0) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [cls_token], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_4 del primals_5 return (buf1, reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0), primals_1, primals_3, reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 1, 2, 2), (4, 4, 2, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class ConvHeadPooling(nn.Module): """Adapted from https://github.com/naver-ai/pit/blob/9d97a62e6a2a72a86685003998fcae700f952e18/pit.py#L54-L69 """ def __init__(self, in_feature: 'int', out_feature: 'int', stride: 'int'): super(ConvHeadPooling, self).__init__() self.conv = nn.Conv2d(in_channels=in_feature, out_channels= out_feature, kernel_size=stride + 1, padding=stride // 2, stride=stride, groups=in_feature) self.fc = nn.Linear(in_feature, out_feature) def forward(self, x, cls_token): x = self.conv(x) cls_token = self.fc(cls_token) return x, cls_token def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_feature': 4, 'out_feature': 4, 'stride': 1}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 9 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 1, 2, 2), (4, 4, 2, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=4, bias=None) assert_size_stride(buf0, (4, 4, 3, 3), (36, 9, 3, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(144)](buf1, primals_2, 144, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf2) del primals_4 del primals_5 return buf1, reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), primals_1, primals_3, reinterpret_tensor(primals_6, (64, 4), (4, 1), 0) class ConvHeadPoolingNew(nn.Module): """Adapted from https://github.com/naver-ai/pit/blob/9d97a62e6a2a72a86685003998fcae700f952e18/pit.py#L54-L69 """ def __init__(self, in_feature: 'int', out_feature: 'int', stride: 'int'): super(ConvHeadPoolingNew, self).__init__() self.conv = nn.Conv2d(in_channels=in_feature, out_channels= out_feature, kernel_size=stride + 1, padding=stride // 2, stride=stride, groups=in_feature) self.fc = nn.Linear(in_feature, out_feature) def forward(self, input_0, input_1): primals_1 = self.conv.weight primals_2 = self.conv.bias primals_4 = self.fc.weight primals_5 = self.fc.bias primals_3 = input_0 primals_6 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0], output[1]
Justin900429/vision-transformer
ConvHeadPooling
false
5,411
[ "MIT" ]
1
e149092efbb83c166449944137db0ee5200f9325
https://github.com/Justin900429/vision-transformer/tree/e149092efbb83c166449944137db0ee5200f9325
import torch import torch.nn as nn class Model(nn.Module): """Adapted from https://github.com/naver-ai/pit/blob/9d97a62e6a2a72a86685003998fcae700f952e18/pit.py#L54-L69 """ def __init__(self, in_feature: 'int', out_feature: 'int', stride: 'int'): super().__init__() self.conv = nn.Conv2d(in_channels=in_feature, out_channels= out_feature, kernel_size=stride + 1, padding=stride // 2, stride=stride, groups=in_feature) self.fc = nn.Linear(in_feature, out_feature) def forward(self, x, cls_token): x = self.conv(x) cls_token = self.fc(cls_token) return x, cls_token def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 4, 1]
AffineGridGen
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/ps/cpsodef265fez7yctn2j3nynklbbmfuxn7bi57zsgzlanh73sk5f.py # Topologically Sorted Source Nodes: [affine_grid], Original ATen: [aten.affine_grid_generator] # Source node to ATen node mapping: # affine_grid => add_3, constant_pad_nd_2, full_default # Graph fragment: # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([1, 1, 1], 1), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %constant_pad_nd_2 : [num_users=1] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%full_default, [2, 0], 0.0), kwargs = {}) # %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_2, %constant_pad_nd_2), kwargs = {}) triton_poi_fused_affine_grid_generator_0 = async_compile.triton('triton_poi_fused_affine_grid_generator_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[262144], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_affine_grid_generator_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_affine_grid_generator_0(out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 172800 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 3 x1 = (xindex // 3) % 240 x2 = (xindex // 720) x5 = xindex tmp0 = x0 tmp1 = tl.full([1], 1, tl.int64) tmp2 = tmp0 < tmp1 tmp3 = x1 tmp4 = tmp3.to(tl.float32) tmp5 = 120.0 tmp6 = tmp4 < tmp5 tmp7 = 0.008333333333333333 tmp8 = tmp4 * tmp7 tmp9 = -0.9958333333333333 tmp10 = tmp8 + tmp9 tmp11 = 239 + ((-1)*x1) tmp12 = tmp11.to(tl.float32) tmp13 = tmp12 * tmp7 tmp14 = 0.9958333333333333 tmp15 = tmp14 - tmp13 tmp16 = tl.where(tmp6, tmp10, tmp15) tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype) tmp18 = tl.where(tmp2, tmp16, tmp17) tmp19 = (-1) + x0 tmp20 = tl.full([1], 0, tl.int64) tmp21 = tmp19 >= tmp20 tmp22 = tmp19 < tmp1 tmp23 = tmp21 & tmp22 tmp24 = x2 tmp25 = tmp24.to(tl.float32) tmp26 = tmp25 < tmp5 tmp27 = tmp25 * tmp7 tmp28 = tmp27 + tmp9 tmp29 = 239 + ((-1)*x2) tmp30 = tmp29.to(tl.float32) tmp31 = tmp30 * tmp7 tmp32 = tmp14 - tmp31 tmp33 = tl.where(tmp26, tmp28, tmp32) tmp34 = tl.full(tmp33.shape, 0.0, tmp33.dtype) tmp35 = tl.where(tmp23, tmp33, tmp34) tmp36 = tmp18 + tmp35 tmp37 = (-2) + x0 tmp38 = tmp37 >= tmp20 tmp39 = 1.0 tmp40 = tl.full(tmp39.shape, 0.0, tmp39.dtype) tmp41 = tl.where(tmp38, tmp39, tmp40) tmp42 = tmp36 + tmp41 tl.store(out_ptr0 + (x5), tmp42, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/72/c72ko7bwrnxcz33klyfokecvp4hnpe3scztiorsnr3founy7q6m3.py # Topologically Sorted Source Nodes: [affine_grid], Original ATen: [aten.affine_grid_generator] # Source node to ATen node mapping: # affine_grid => mul_4, sum_1 # Graph fragment: # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_2, %unsqueeze), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_4, [-2]), kwargs = {}) triton_poi_fused_affine_grid_generator_1 = async_compile.triton('triton_poi_fused_affine_grid_generator_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[524288], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_affine_grid_generator_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_affine_grid_generator_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 460800 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x1 = (xindex // 2) % 57600 x0 = xindex % 2 x2 = (xindex // 115200) x3 = xindex tmp0 = tl.load(in_ptr0 + (3*x1), None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + ((3*x0) + (6*x2)), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (3*x1)), None, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + (3*x0) + (6*x2)), None, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + (3*x1)), None, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + (3*x0) + (6*x2)), None, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 * tmp8 tmp10 = tmp6 + tmp9 tl.store(out_ptr0 + (x3), tmp10, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 2, 3), (6, 3, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((240, 240, 3), (720, 3, 1), torch.float32) # Topologically Sorted Source Nodes: [affine_grid], Original ATen: [aten.affine_grid_generator] stream0 = get_raw_stream(0) triton_poi_fused_affine_grid_generator_0.run(buf1, 172800, grid=grid(172800), stream=stream0) buf2 = empty_strided_cuda((4, 57600, 2), (115200, 2, 1), torch.float32) # Topologically Sorted Source Nodes: [affine_grid], Original ATen: [aten.affine_grid_generator] triton_poi_fused_affine_grid_generator_1.run(buf1, arg0_1, buf2, 460800, grid=grid(460800), stream=stream0) del arg0_1 del buf1 return (reinterpret_tensor(buf2, (4, 240, 240, 2), (115200, 480, 2, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 2, 3), (6, 3, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from torch.nn import Module import torch import torch.nn.functional as F import torch.nn from torch.nn.modules.module import Module class AffineGridGen(Module): def __init__(self, out_h=240, out_w=240, out_ch=3, use_cuda=True): super(AffineGridGen, self).__init__() self.out_h = out_h self.out_w = out_w self.out_ch = out_ch def forward(self, theta): b = theta.size()[0] if not theta.size() == (b, 2, 3): theta = theta.view(-1, 2, 3) theta = theta.contiguous() batch_size = theta.size()[0] out_size = torch.Size((batch_size, self.out_ch, self.out_h, self.out_w) ) return F.affine_grid(theta, out_size) def get_inputs(): return [torch.rand([4, 2, 3])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch.nn import Module import torch.nn from torch.nn.modules.module import Module assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_affine_grid_generator_0(out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 172800 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 3 x1 = xindex // 3 % 240 x2 = xindex // 720 x5 = xindex tmp0 = x0 tmp1 = tl.full([1], 1, tl.int64) tmp2 = tmp0 < tmp1 tmp3 = x1 tmp4 = tmp3.to(tl.float32) tmp5 = 120.0 tmp6 = tmp4 < tmp5 tmp7 = 0.008333333333333333 tmp8 = tmp4 * tmp7 tmp9 = -0.9958333333333333 tmp10 = tmp8 + tmp9 tmp11 = 239 + -1 * x1 tmp12 = tmp11.to(tl.float32) tmp13 = tmp12 * tmp7 tmp14 = 0.9958333333333333 tmp15 = tmp14 - tmp13 tmp16 = tl.where(tmp6, tmp10, tmp15) tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype) tmp18 = tl.where(tmp2, tmp16, tmp17) tmp19 = -1 + x0 tmp20 = tl.full([1], 0, tl.int64) tmp21 = tmp19 >= tmp20 tmp22 = tmp19 < tmp1 tmp23 = tmp21 & tmp22 tmp24 = x2 tmp25 = tmp24.to(tl.float32) tmp26 = tmp25 < tmp5 tmp27 = tmp25 * tmp7 tmp28 = tmp27 + tmp9 tmp29 = 239 + -1 * x2 tmp30 = tmp29.to(tl.float32) tmp31 = tmp30 * tmp7 tmp32 = tmp14 - tmp31 tmp33 = tl.where(tmp26, tmp28, tmp32) tmp34 = tl.full(tmp33.shape, 0.0, tmp33.dtype) tmp35 = tl.where(tmp23, tmp33, tmp34) tmp36 = tmp18 + tmp35 tmp37 = -2 + x0 tmp38 = tmp37 >= tmp20 tmp39 = 1.0 tmp40 = tl.full(tmp39.shape, 0.0, tmp39.dtype) tmp41 = tl.where(tmp38, tmp39, tmp40) tmp42 = tmp36 + tmp41 tl.store(out_ptr0 + x5, tmp42, xmask) @triton.jit def triton_poi_fused_affine_grid_generator_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 2 % 57600 x0 = xindex % 2 x2 = xindex // 115200 x3 = xindex tmp0 = tl.load(in_ptr0 + 3 * x1, None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (3 * x0 + 6 * x2), None, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (1 + 3 * x1), None, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 3 * x0 + 6 * x2), None, eviction_policy= 'evict_last') tmp7 = tl.load(in_ptr0 + (2 + 3 * x1), None, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 3 * x0 + 6 * x2), None, eviction_policy= 'evict_last') tmp2 = tmp0 * tmp1 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 * tmp8 tmp10 = tmp6 + tmp9 tl.store(out_ptr0 + x3, tmp10, None) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 2, 3), (6, 3, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((240, 240, 3), (720, 3, 1), torch.float32) get_raw_stream(0) triton_poi_fused_affine_grid_generator_0[grid(172800)](buf1, 172800, XBLOCK=512, num_warps=8, num_stages=1) buf2 = empty_strided_cuda((4, 57600, 2), (115200, 2, 1), torch.float32) triton_poi_fused_affine_grid_generator_1[grid(460800)](buf1, arg0_1, buf2, 460800, XBLOCK=1024, num_warps=4, num_stages=1) del arg0_1 del buf1 return reinterpret_tensor(buf2, (4, 240, 240, 2), (115200, 480, 2, 1), 0), class AffineGridGenNew(Module): def __init__(self, out_h=240, out_w=240, out_ch=3, use_cuda=True): super(AffineGridGenNew, self).__init__() self.out_h = out_h self.out_w = out_w self.out_ch = out_ch def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
JiwonCocoder/-Joint-Learning-of-Feature-Extraction-and-Cost-Aggregation-for-Semantic-Matching
AffineGridGen
false
5,412
[ "MIT" ]
1
b79e0e20fd5a1a9ddc0ffa9d7a92e0ebd21018b9
https://github.com/JiwonCocoder/-Joint-Learning-of-Feature-Extraction-and-Cost-Aggregation-for-Semantic-Matching/tree/b79e0e20fd5a1a9ddc0ffa9d7a92e0ebd21018b9
from torch.nn import Module import torch import torch.nn.functional as F import torch.nn from torch.nn.modules.module import Module class Model(Module): def __init__(self, out_h=240, out_w=240, out_ch=3, use_cuda=True): super().__init__() self.out_h = out_h self.out_w = out_w self.out_ch = out_ch def forward(self, theta): b = theta.size()[0] if not theta.size() == (b, 2, 3): theta = theta.view(-1, 2, 3) theta = theta.contiguous() batch_size = theta.size()[0] out_size = torch.Size((batch_size, self.out_ch, self.out_h, self.out_w) ) return F.affine_grid(theta, out_size) def get_inputs(): return [torch.rand([4, 2, 3])] def get_init_inputs(): return []
CMDS_Loss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/if/cifzfs6xjtoe3hyibakxibge2wf4dwkajxdfqdom3v4wsid4im3f.py # Topologically Sorted Source Nodes: [mean, m], Original ATen: [aten.mean, aten.sub] # Source node to ATen node mapping: # m => sub # mean => mean # Graph fragment: # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%permute, [1], True), kwargs = {}) # %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %mean), kwargs = {}) triton_poi_fused_mean_sub_0 = async_compile.triton('triton_poi_fused_mean_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mean_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mean_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (12 + x0), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = 4.0 tmp9 = tmp7 / tmp8 tmp10 = tmp0 - tmp9 tl.store(out_ptr0 + (x2), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/xy/cxyejw3v2tom45hbokfxiuszvwckow3ssrglcihjalzfzebxxrge.py # Topologically Sorted Source Nodes: [XTX, YTY, sub_2, norm, cmds], Original ATen: [aten.mul, aten.sub, aten.linalg_vector_norm, aten.pow] # Source node to ATen node mapping: # XTX => mul # YTY => mul_1 # cmds => pow_3 # norm => pow_1, pow_2, sum_1 # sub_2 => sub_2 # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%squeeze, 0.25), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%squeeze_1, 0.25), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %mul_1), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_2, 2), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, None), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {}) # %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%pow_2, 2), kwargs = {}) triton_per_fused_linalg_vector_norm_mul_pow_sub_1 = async_compile.triton('triton_per_fused_linalg_vector_norm_mul_pow_sub_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_linalg_vector_norm_mul_pow_sub_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_linalg_vector_norm_mul_pow_sub_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp3 = tl.load(in_ptr1 + (r0), None) tmp1 = 0.25 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp5 = tmp2 - tmp4 tmp6 = tmp5 * tmp5 tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.sum(tmp7, 1)[:, None] tmp10 = libdevice.sqrt(tmp9) tmp11 = tmp10 * tmp10 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp11, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (1, 4), torch.float32) # Topologically Sorted Source Nodes: [mean, m], Original ATen: [aten.mean, aten.sub] stream0 = get_raw_stream(0) triton_poi_fused_mean_sub_0.run(arg0_1, buf0, 16, grid=grid(16), stream=stream0) del arg0_1 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.mm] extern_kernels.mm(buf0, reinterpret_tensor(buf0, (4, 4), (4, 1), 0), out=buf1) buf2 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [mean_1, m_1], Original ATen: [aten.mean, aten.sub] triton_poi_fused_mean_sub_0.run(arg1_1, buf2, 16, grid=grid(16), stream=stream0) del arg1_1 buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.mm] extern_kernels.mm(buf2, reinterpret_tensor(buf2, (4, 4), (4, 1), 0), out=buf3) del buf2 buf4 = empty_strided_cuda((), (), torch.float32) buf5 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [XTX, YTY, sub_2, norm, cmds], Original ATen: [aten.mul, aten.sub, aten.linalg_vector_norm, aten.pow] triton_per_fused_linalg_vector_norm_mul_pow_sub_1.run(buf5, buf1, buf3, 1, 16, grid=grid(1), stream=stream0) del buf1 del buf3 return (buf5, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn from sklearn.preprocessing import scale as scale def Covariance(m, bias=False, rowvar=True, inplace=False): """ Estimate a covariance matrix given data(tensor). Covariance indicates the level to which two variables vary together. If we examine N-dimensional samples, `X = [x_1, x_2, ... x_N]^T`, then the covariance matrix element `C_{ij}` is the covariance of `x_i` and `x_j`. The element `C_{ii}` is the variance of `x_i`. Args: m: numpy array - A 1-D or 2-D array containing multiple variables and observations. Each row of `m` represents a variable, and each column a single observation of all those variables. rowvar: bool - If `rowvar` is True, then each row represents a variable, with observations in the columns. Otherwise, the relationship is transposed: each column represents a variable, while the rows contain observations. Returns: The covariance matrix of the variables. """ if m.dim() > 2: raise ValueError('m has more than 2 dimensions') if m.dim() < 2: m = m.view(1, -1) if not rowvar and m.size(0) != 1: m = m.t() fact = 1.0 / (m.size(1) - 1) if not bias else 1.0 / m.size(1) if inplace: m -= torch.mean(m, dim=1, keepdim=True) else: m = m - torch.mean(m, dim=1, keepdim=True) mt = m.t() return fact * m.matmul(mt).squeeze() class CMDS_Loss(nn.Module): """Equation(1) in Self-calibrating Neural Networks for Dimensionality Reduction Attributes: X: tensor - original datas. Y: tensor - encoded datas. Returns: cmds: float - The cmds loss. """ def __init__(self): super(CMDS_Loss, self).__init__() def forward(self, y, x): XTX = Covariance(x.T, bias=True) YTY = Covariance(y.T, bias=True) cmds = torch.norm(XTX - YTY) ** 2 return cmds def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch import nn from sklearn.preprocessing import scale as scale assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mean_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (12 + x0), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = 4.0 tmp9 = tmp7 / tmp8 tmp10 = tmp0 - tmp9 tl.store(out_ptr0 + x2, tmp10, xmask) @triton.jit def triton_per_fused_linalg_vector_norm_mul_pow_sub_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp3 = tl.load(in_ptr1 + r0, None) tmp1 = 0.25 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp5 = tmp2 - tmp4 tmp6 = tmp5 * tmp5 tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.sum(tmp7, 1)[:, None] tmp10 = libdevice.sqrt(tmp9) tmp11 = tmp10 * tmp10 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp11, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (1, 4), torch.float32) get_raw_stream(0) triton_poi_fused_mean_sub_0[grid(16)](arg0_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del arg0_1 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf0, reinterpret_tensor(buf0, (4, 4), (4, 1), 0), out=buf1) buf2 = buf0 del buf0 triton_poi_fused_mean_sub_0[grid(16)](arg1_1, buf2, 16, XBLOCK=16, num_warps=1, num_stages=1) del arg1_1 buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf2, reinterpret_tensor(buf2, (4, 4), (4, 1), 0), out=buf3) del buf2 buf4 = empty_strided_cuda((), (), torch.float32) buf5 = buf4 del buf4 triton_per_fused_linalg_vector_norm_mul_pow_sub_1[grid(1)](buf5, buf1, buf3, 1, 16, XBLOCK=1, num_warps=2, num_stages=1) del buf1 del buf3 return buf5, def Covariance(m, bias=False, rowvar=True, inplace=False): """ Estimate a covariance matrix given data(tensor). Covariance indicates the level to which two variables vary together. If we examine N-dimensional samples, `X = [x_1, x_2, ... x_N]^T`, then the covariance matrix element `C_{ij}` is the covariance of `x_i` and `x_j`. The element `C_{ii}` is the variance of `x_i`. Args: m: numpy array - A 1-D or 2-D array containing multiple variables and observations. Each row of `m` represents a variable, and each column a single observation of all those variables. rowvar: bool - If `rowvar` is True, then each row represents a variable, with observations in the columns. Otherwise, the relationship is transposed: each column represents a variable, while the rows contain observations. Returns: The covariance matrix of the variables. """ if m.dim() > 2: raise ValueError('m has more than 2 dimensions') if m.dim() < 2: m = m.view(1, -1) if not rowvar and m.size(0) != 1: m = m.t() fact = 1.0 / (m.size(1) - 1) if not bias else 1.0 / m.size(1) if inplace: m -= torch.mean(m, dim=1, keepdim=True) else: m = m - torch.mean(m, dim=1, keepdim=True) mt = m.t() return fact * m.matmul(mt).squeeze() class CMDS_LossNew(nn.Module): """Equation(1) in Self-calibrating Neural Networks for Dimensionality Reduction Attributes: X: tensor - original datas. Y: tensor - encoded datas. Returns: cmds: float - The cmds loss. """ def __init__(self): super(CMDS_LossNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
Gustoaxel/Statistical-autoencoder
CMDS_Loss
false
5,413
[ "MIT" ]
1
f3328f9c2a45ef0f7fe4adf98af4a64d02d34afc
https://github.com/Gustoaxel/Statistical-autoencoder/tree/f3328f9c2a45ef0f7fe4adf98af4a64d02d34afc
import torch from torch import nn from sklearn.preprocessing import scale as scale def Covariance(m, bias=False, rowvar=True, inplace=False): """ Estimate a covariance matrix given data(tensor). Covariance indicates the level to which two variables vary together. If we examine N-dimensional samples, `X = [x_1, x_2, ... x_N]^T`, then the covariance matrix element `C_{ij}` is the covariance of `x_i` and `x_j`. The element `C_{ii}` is the variance of `x_i`. Args: m: numpy array - A 1-D or 2-D array containing multiple variables and observations. Each row of `m` represents a variable, and each column a single observation of all those variables. rowvar: bool - If `rowvar` is True, then each row represents a variable, with observations in the columns. Otherwise, the relationship is transposed: each column represents a variable, while the rows contain observations. Returns: The covariance matrix of the variables. """ if m.dim() > 2: raise ValueError('m has more than 2 dimensions') if m.dim() < 2: m = m.view(1, -1) if not rowvar and m.size(0) != 1: m = m.t() fact = 1.0 / (m.size(1) - 1) if not bias else 1.0 / m.size(1) if inplace: m -= torch.mean(m, dim=1, keepdim=True) else: m = m - torch.mean(m, dim=1, keepdim=True) mt = m.t() return fact * m.matmul(mt).squeeze() class Model(nn.Module): """Equation(1) in Self-calibrating Neural Networks for Dimensionality Reduction Attributes: X: tensor - original datas. Y: tensor - encoded datas. Returns: cmds: float - The cmds loss. """ def __init__(self): super().__init__() def forward(self, y, x): XTX = Covariance(x.T, bias=True) YTY = Covariance(y.T, bias=True) cmds = torch.norm(XTX - YTY) ** 2 return cmds def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return []
NNMerge
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/pk/cpkt7a6rcm5naxvivdyxizhuu5o53uzxer5jbfqti5motnome27d.py # Topologically Sorted Source Nodes: [sum_1], Original ATen: [aten.sum] # Source node to ATen node mapping: # sum_1 => sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%arg0_1, [-2]), kwargs = {}) triton_poi_fused_sum_0 = async_compile.triton('triton_poi_fused_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (16*x1)), xmask) tmp1 = tl.load(in_ptr0 + (4 + x0 + (16*x1)), xmask) tmp3 = tl.load(in_ptr0 + (8 + x0 + (16*x1)), xmask) tmp5 = tl.load(in_ptr0 + (12 + x0 + (16*x1)), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [sum_1], Original ATen: [aten.sum] stream0 = get_raw_stream(0) triton_poi_fused_sum_0.run(arg0_1, buf0, 64, grid=grid(64), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class NNMerge(nn.Module): def __init__(self): super(NNMerge, self).__init__() def forward(self, x): """ (k,D) -> (D,) """ return torch.sum(x, -2) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask) tmp1 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask) tmp3 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask) tmp5 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_sum_0[grid(64)](arg0_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 return buf0, class NNMergeNew(nn.Module): def __init__(self): super(NNMergeNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
Justin-Yuan/learn-to-interact
NNMerge
false
5,414
[ "MIT" ]
1
eb013bb3bab269bda8a8075e64fe3bcd2964d8ae
https://github.com/Justin-Yuan/learn-to-interact/tree/eb013bb3bab269bda8a8075e64fe3bcd2964d8ae
import torch import torch.nn as nn class Model(nn.Module): def __init__(self): super().__init__() def forward(self, x): """ (k,D) -> (D,) """ return torch.sum(x, -2) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []